Merge
authorlana
Wed, 17 Aug 2011 22:47:12 -0700
changeset 10346 916b87d13b0b
parent 10345 b4aebbfc5b3a (current diff)
parent 10205 de9223c94f9c (diff)
child 10347 1c9efe1ec7d3
child 10370 5db0cf452a50
Merge
--- a/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 07a8728ad49ef6dfa469c3a8bf5ab1e9c80bed5c jdk7-b144
 8294c99e685a1f6d1d37c45cd97854cf74be771e jdk7-b145
 dca1e8a87e8f756f95b99bac8fe795750d42e1b0 jdk7-b146
+a2a589fc29543ed32919c78a1810ad93a6fcf5bc jdk7-b147
--- a/.hgtags-top-repo	Wed Aug 17 15:18:16 2011 -0700
+++ b/.hgtags-top-repo	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 7203965666a4fe63bf82f5e4204f41ce6285e716 jdk7-b144
 55e9ebf032186c333e5964ed044419830ac02693 jdk7-b145
 2d38c2a79c144c30cd04d143d83ee7ec6af40771 jdk7-b146
+d91364304d7c4ecd34caffdba2b840aeb0d10b51 jdk7-b147
--- a/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/corba/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/corba/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 7033a5756ad552d88114594d8e2d2e4dc2c05963 jdk7-b144
 77ec0541aa2aa4da27e9e385a118a2e51e7fca24 jdk7-b145
 770227a4087e4e401fe87ccd19738440111c3948 jdk7-b146
+73323cb3396260d93e0ab731fd2d431096ceed0f jdk7-b147
--- a/corba/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/corba/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/corba/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/corba/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -25,12 +25,23 @@
 
 # Properties for jprt
 
-# Use whatever release that the submitted job requests
-jprt.tools.default.release=${jprt.submit.release}
+# The release to build
+jprt.tools.default.release=jdk8
 
 # The different build flavors we want, we override here so we just get these 2
 jprt.build.flavors=product,fastdebug
 
+# Standard list of jprt build targets for this source tree
+jprt.build.targets= 						\
+    solaris_sparc_5.10-{product|fastdebug}, 			\
+    solaris_sparcv9_5.10-{product|fastdebug}, 			\
+    solaris_i586_5.10-{product|fastdebug}, 			\
+    solaris_x64_5.10-{product|fastdebug}, 			\
+    linux_i586_2.6-{product|fastdebug}, 			\
+    linux_x64_2.6-{product|fastdebug}, 				\
+    windows_i586_5.1-{product|fastdebug}, 			\
+    windows_x64_5.2-{product|fastdebug}
+
 # Directories to be excluded from the source bundles
 jprt.bundle.exclude.src.dirs=build dist webrev
 
--- a/hotspot/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -172,3 +172,5 @@
 3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11
 9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142
 9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12
+c149193c768b8b7233da4c3a3fdc0756b975848e hs21-b13
+c149193c768b8b7233da4c3a3fdc0756b975848e jdk7-b143
--- a/hotspot/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1028,7 +1028,12 @@
                                     if (AddressOps.equal(val, value)) {
                                         if (!printed) {
                                             printed = true;
-                                            blob.printOn(out);
+                                            try {
+                                                blob.printOn(out);
+                                            } catch (Exception e) {
+                                                out.println("Exception printing blob at " + base);
+                                                e.printStackTrace();
+                                            }
                                         }
                                         out.println("found at " + base + "\n");
                                     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/AdapterBlob.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class AdapterBlob extends CodeBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("AdapterBlob");
+
+    // // FIXME: add any needed fields
+  }
+
+  public AdapterBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isAdapterBlob() {
+    return true;
+  }
+
+  public String getName() {
+    return "AdapterBlob: " + super.getName();
+  }
+}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,8 @@
   public boolean isUncommonTrapStub()   { return false; }
   public boolean isExceptionStub()      { return false; }
   public boolean isSafepointStub()      { return false; }
+  public boolean isRicochetBlob()       { return false; }
+  public boolean isAdapterBlob()        { return false; }
 
   // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
   public boolean isJavaMethod()         { return false; }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,8 @@
     virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
     virtualConstructor.addMapping("nmethod", NMethod.class);
     virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
+    virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
+    virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
     virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
     virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
     if (VM.getVM().isServerCompiler()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+/** RicochetBlob (currently only used by Compiler 2) */
+
+public class RicochetBlob extends SingletonBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("RicochetBlob");
+
+    // FIXME: add any needed fields
+  }
+
+  public RicochetBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isRicochetBlob() {
+    return true;
+  }
+}
--- a/hotspot/make/hotspot_version	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/make/hotspot_version	Wed Aug 17 22:47:12 2011 -0700
@@ -33,13 +33,13 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2011
 
-HS_MAJOR_VER=21
+HS_MAJOR_VER=22
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=13
+HS_BUILD_NUMBER=01
 
 JDK_MAJOR_VER=1
-JDK_MINOR_VER=7
+JDK_MINOR_VER=8
 JDK_MICRO_VER=0
 
 # Previous (bootdir) JDK version
-JDK_PREVIOUS_VERSION=1.6.0
+JDK_PREVIOUS_VERSION=1.7.0
--- a/hotspot/make/jprt.gmk	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/make/jprt.gmk	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,24 @@
   ZIPFLAGS=-q -y
 endif
 
+jprt_build_productEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
+
+jprt_build_debugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
+
+jprt_build_fastdebugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
+
+jprt_build_productOpen:
+	$(MAKE) OPENJDK=true jprt_build_product
+
+jprt_build_debugOpen:
+	$(MAKE) OPENJDK=true jprt_build_debug
+
+jprt_build_fastdebugOpen:
+	$(MAKE) OPENJDK=true jprt_build_fastdebug
+
 jprt_build_product: all_product copy_product_jdk export_product_jdk
 	( $(CD) $(JDK_IMAGE_DIR) && \
 	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
--- a/hotspot/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -50,7 +50,7 @@
 #       sparc etc.
 
 # Define the Solaris platforms we want for the various releases
-
+jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
@@ -64,6 +64,7 @@
 jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
+jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
@@ -77,6 +78,7 @@
 jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
+jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
@@ -90,6 +92,7 @@
 jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
+jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
@@ -103,6 +106,7 @@
 jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
+jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
 jprt.my.linux.i586.jdk7b107=linux_i586_2.6
 jprt.my.linux.i586.jdk7temp=linux_i586_2.6
@@ -116,6 +120,7 @@
 jprt.my.linux.i586.ejdk6=linux_i586_2.6
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
+jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
 jprt.my.linux.x64.jdk7b107=linux_x64_2.6
 jprt.my.linux.x64.jdk7temp=linux_x64_2.6
@@ -129,6 +134,7 @@
 jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
+jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6
@@ -136,6 +142,7 @@
 jprt.my.linux.ppc.ejdk7=linux_ppc_2.6
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
+jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6
@@ -143,6 +150,7 @@
 jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
+jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6
@@ -150,6 +158,7 @@
 jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
+jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6
@@ -157,6 +166,7 @@
 jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
+jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6
@@ -164,6 +174,7 @@
 jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
+jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
 jprt.my.windows.i586.jdk7b107=windows_i586_5.0
 jprt.my.windows.i586.jdk7temp=windows_i586_5.0
@@ -177,6 +188,7 @@
 jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
+jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
 jprt.my.windows.x64.jdk7b107=windows_x64_5.2
 jprt.my.windows.x64.jdk7temp=windows_x64_5.2
@@ -202,17 +214,23 @@
     ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     ${jprt.my.windows.x64}-{product|fastdebug|debug}
 
+jprt.build.targets.open= \
+    ${jprt.my.solaris.i586}-{productOpen}, \
+    ${jprt.my.solaris.x64}-{debugOpen}, \
+    ${jprt.my.linux.x64}-{productOpen}
+
 jprt.build.targets.embedded= \
-    ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
-    ${jprt.my.linux.ppc}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcv2}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcsflt}-{product|fastdebug}, \
-    ${jprt.my.linux.armvfp}-{product|fastdebug}, \
-    ${jprt.my.linux.armsflt}-{product|fastdebug}
+    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
+    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
 
 jprt.build.targets.all=${jprt.build.targets.standard}, \
-    ${jprt.build.targets.embedded}
+    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
 
+jprt.build.targets.jdk8=${jprt.build.targets.all}
 jprt.build.targets.jdk7=${jprt.build.targets.all}
 jprt.build.targets.jdk7temp=${jprt.build.targets.all}
 jprt.build.targets.jdk7b107=${jprt.build.targets.all}
@@ -453,6 +471,12 @@
     ${jprt.my.windows.x64}-product-c2-jbb_G1, \
     ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
 
+# Some basic "smoke" tests for OpenJDK builds
+jprt.test.targets.open = \
+    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
+
 # Testing for actual embedded builds is different to standard
 jprt.my.linux.i586.test.targets.embedded = \
     linux_i586_2.6-product-c1-scimark
@@ -461,6 +485,7 @@
 # Note: no PPC or ARM tests at this stage
 
 jprt.test.targets.standard = \
+  ${jprt.my.linux.i586.test.targets.embedded}, \
   ${jprt.my.solaris.sparc.test.targets}, \
   ${jprt.my.solaris.sparcv9.test.targets}, \
   ${jprt.my.solaris.i586.test.targets}, \
@@ -468,7 +493,8 @@
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}
+  ${jprt.my.windows.x64.test.targets}, \
+  ${jprt.test.targets.open}
 
 jprt.test.targets.embedded= 		\
   ${jprt.my.linux.i586.test.targets.embedded}, \
@@ -481,6 +507,7 @@
   ${jprt.my.windows.x64.test.targets}
 
 
+jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
 jprt.test.targets.jdk7temp=${jprt.test.targets.standard}
 jprt.test.targets.jdk7b105=${jprt.test.targets.standard}
@@ -521,6 +548,7 @@
 jprt.make.rule.test.targets.embedded = \
   ${jprt.make.rule.test.targets.standard.client}
 
+jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard}
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -42,6 +42,12 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #endif
 
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
 // Convert the raw encoding form into the form expected by the
 // constructor for Address.
 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
@@ -1072,6 +1078,12 @@
     check_and_forward_exception(Gtemp);
   }
 
+#ifdef ASSERT
+  set(badHeapWordVal, G3);
+  set(badHeapWordVal, G4);
+  set(badHeapWordVal, G5);
+#endif
+
   // get oop result if there is one and reset the value in the thread
   if (oop_result->is_valid()) {
     get_vm_result(oop_result);
@@ -1177,6 +1189,11 @@
   call(entry_point, relocInfo::runtime_call_type);
   delayed()->nop();
   restore_thread(thread_cache);
+#ifdef ASSERT
+  set(badHeapWordVal, G3);
+  set(badHeapWordVal, G4);
+  set(badHeapWordVal, G5);
+#endif
 }
 
 
@@ -1518,7 +1535,7 @@
 // save_frame: given number of "extra" words in frame,
 // issue approp. save instruction (p 200, v8 manual)
 
-void MacroAssembler::save_frame(int extraWords = 0) {
+void MacroAssembler::save_frame(int extraWords) {
   int delta = -total_frame_size_in_bytes(extraWords);
   if (is_simm13(delta)) {
     save(SP, delta, SP);
@@ -1730,6 +1747,7 @@
 
   if (reg == G0)  return;       // always NULL, which is always an oop
 
+  BLOCK_COMMENT("verify_oop {");
   char buffer[64];
 #ifdef COMPILER1
   if (CommentedAssembly) {
@@ -1768,6 +1786,7 @@
   delayed()->nop();
   // recover frame size
   add(SP, 8*8,SP);
+  BLOCK_COMMENT("} verify_oop");
 }
 
 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
@@ -2040,7 +2059,7 @@
   }
   else
      ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
-  assert(false, "error");
+  assert(false, err_msg("DEBUG MESSAGE: %s", msg));
 }
 
 
@@ -3230,6 +3249,7 @@
 
 
 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
+                                                   Register temp_reg,
                                                    int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
   int stackElementSize = Interpreter::stackElementSize;
@@ -3238,18 +3258,19 @@
     offset += arg_slot.as_constant() * stackElementSize;
     return offset;
   } else {
-    Register temp = arg_slot.as_register();
-    sll_ptr(temp, exact_log2(stackElementSize), temp);
+    assert(temp_reg != noreg, "must specify");
+    sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
     if (offset != 0)
-      add(temp, offset, temp);
-    return temp;
+      add(temp_reg, offset, temp_reg);
+    return temp_reg;
   }
 }
 
 
 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+                                         Register temp_reg,
                                          int extra_slot_offset) {
-  return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
+  return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
 }
 
 
@@ -4906,4 +4927,3 @@
   // Caller should set it:
   // add(G0, 1, result); // equals
 }
-
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -309,12 +309,14 @@
 #endif
 
   // accessors
-  Register base()      const { return _base; }
-  Register index()     const { return _index_or_disp.as_register(); }
-  int      disp()      const { return _index_or_disp.as_constant(); }
-
-  bool     has_index() const { return _index_or_disp.is_register(); }
-  bool     has_disp()  const { return _index_or_disp.is_constant(); }
+  Register base()             const { return _base; }
+  Register index()            const { return _index_or_disp.as_register(); }
+  int      disp()             const { return _index_or_disp.as_constant(); }
+
+  bool     has_index()        const { return _index_or_disp.is_register(); }
+  bool     has_disp()         const { return _index_or_disp.is_constant(); }
+
+  bool     uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
 
   const relocInfo::relocType rtype() { return _rspec.type(); }
   const RelocationHolder&    rspec() { return _rspec; }
@@ -330,6 +332,10 @@
     Address a(base(), disp() + plusdisp);
     return a;
   }
+  bool is_same_address(Address a) const {
+    // disregard _rspec
+    return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
+  }
 
   Address after_save() const {
     Address a = (*this);
@@ -436,6 +442,10 @@
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 
+  AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
   AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
@@ -455,6 +465,21 @@
   }
 };
 
+// Convenience classes
+class ExternalAddress: public AddressLiteral {
+ private:
+  static relocInfo::relocType reloc_for_target(address target) {
+    // Sometimes ExternalAddress is used for values which aren't
+    // exactly addresses, like the card table base.
+    // external_word_type can't be used for values in the first page
+    // so just skip the reloc in that case.
+    return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
+  }
+
+ public:
+  ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(          target)) {}
+  ExternalAddress(oop*    target) : AddressLiteral(target, reloc_for_target((address) target)) {}
+};
 
 inline Address RegisterImpl::address_in_saved_window() const {
    return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
@@ -691,6 +716,8 @@
     casa_op3     = 0x3c,
     casxa_op3    = 0x3e,
 
+    mftoi_op3    = 0x36,
+
     alt_bit_op3  = 0x10,
      cc_bit_op3  = 0x10
   };
@@ -725,7 +752,13 @@
     fitod_opf   = 0xc8,
     fstod_opf   = 0xc9,
     fstoi_opf   = 0xd1,
-    fdtoi_opf   = 0xd2
+    fdtoi_opf   = 0xd2,
+
+    mdtox_opf   = 0x110,
+    mstouw_opf  = 0x111,
+    mstosw_opf  = 0x113,
+    mxtod_opf   = 0x118,
+    mwtos_opf   = 0x119
   };
 
   enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7  };
@@ -855,9 +888,8 @@
   // and be sign-extended. Check the range.
 
   static void assert_signed_range(intptr_t x, int nbits) {
-    assert( nbits == 32
-        ||  -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
-      "value out of range");
+    assert(nbits == 32 || (-(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1)),
+           err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits));
   }
 
   static void assert_signed_word_disp_range(intptr_t x, int nbits) {
@@ -1037,6 +1069,9 @@
     return x & ((1 << 10) - 1);
   }
 
+  // instruction only in VIS3
+  static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
+
   // instruction only in v9
   static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
 
@@ -1223,8 +1258,8 @@
 
   // pp 159
 
-  void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
-  void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
+  void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
+  void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
 
   // pp 160
 
@@ -1232,8 +1267,8 @@
 
   // pp 161
 
-  void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
-  void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
+  void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); }
+  void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); }
 
   // pp 162
 
@@ -1685,6 +1720,19 @@
   inline void wrasi(  Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
   inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
 
+
+  // VIS3 instructions
+
+  void movstosw( FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
+  void movstouw( FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); }
+  void movdtox(  FloatRegister s, Register d ) { vis3_only();  emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); }
+
+  void movwtos( Register s, FloatRegister d ) { vis3_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
+  void movxtod( Register s, FloatRegister d ) { vis3_only();  emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
+
+
+
+
   // For a given register condition, return the appropriate condition code
   // Condition (the one you would use to get the same effect after "tst" on
   // the target register.)
@@ -2287,7 +2335,7 @@
   int total_frame_size_in_bytes(int extraWords);
 
   // used when extraWords known statically
-  void save_frame(int extraWords);
+  void save_frame(int extraWords = 0);
   void save_frame_c1(int size_in_bytes);
   // make a frame, and simultaneously pass up one or two register value
   // into the new register window
@@ -2456,9 +2504,11 @@
   // offset relative to Gargs of argument at tos[arg_slot].
   // (arg_slot == 0 means the last argument, not the first).
   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
+                                     Register temp_reg,
                                      int extra_slot_offset = 0);
   // Address of Gargs and argument_offset.
   Address            argument_address(RegisterOrConstant arg_slot,
+                                      Register temp_reg,
                                       int extra_slot_offset = 0);
 
   // Stack overflow checking
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -255,7 +255,11 @@
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); }
+inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
+  relocate(a.rspec(offset));
+  if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index()        ); }
+  else               {                          stf(w, d, a.base(), a.disp() + offset); }
+}
 
 inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_long( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -513,6 +513,8 @@
   // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   // explicitly recognized.
 
+  if (is_ricochet_frame())    return sender_for_ricochet_frame(map);
+
   bool frame_is_interpreted = is_interpreted_frame();
   if (frame_is_interpreted) {
     map->make_integer_regs_unsaved();
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -69,6 +69,484 @@
   return me;
 }
 
+// stack walking support
+
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
+  //RicochetFrame* f = RicochetFrame::from_frame(fr);
+  // Cf. is_interpreted_frame path of frame::sender
+  intptr_t* younger_sp = fr.sp();
+  intptr_t* sp         = fr.sender_sp();
+  map->make_integer_regs_unsaved();
+  map->shift_window(sp, younger_sp);
+  bool this_frame_adjusted_stack = true;  // I5_savedSP is live in this RF
+  return frame(sp, younger_sp, this_frame_adjusted_stack);
+}
+
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
+  ResourceMark rm;
+  RicochetFrame* f = RicochetFrame::from_frame(fr);
+
+  // pick up the argument type descriptor:
+  Thread* thread = Thread::current();
+  Handle cookie(thread, f->compute_saved_args_layout(true, true));
+
+  // process fixed part
+  blk->do_oop((oop*)f->saved_target_addr());
+  blk->do_oop((oop*)f->saved_args_layout_addr());
+
+  // process variable arguments:
+  if (cookie.is_null())  return;  // no arguments to describe
+
+  // the cookie is actually the invokeExact method for my target
+  // his argument signature is what I'm interested in
+  assert(cookie->is_method(), "");
+  methodHandle invoker(thread, methodOop(cookie()));
+  assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
+  assert(!invoker->is_static(), "must have MH argument");
+  int slot_count = invoker->size_of_parameters();
+  assert(slot_count >= 1, "must include 'this'");
+  intptr_t* base = f->saved_args_base();
+  intptr_t* retval = NULL;
+  if (f->has_return_value_slot())
+    retval = f->return_value_slot_addr();
+  int slot_num = slot_count - 1;
+  intptr_t* loc = &base[slot_num];
+  //blk->do_oop((oop*) loc);   // original target, which is irrelevant
+  int arg_num = 0;
+  for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
+    if (ss.at_return_type())  continue;
+    BasicType ptype = ss.type();
+    if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
+    assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
+    slot_num -= type2size[ptype];
+    loc = &base[slot_num];
+    bool is_oop = (ptype == T_OBJECT && loc != retval);
+    if (is_oop)  blk->do_oop((oop*)loc);
+    arg_num += 1;
+  }
+  assert(slot_num == 0, "must have processed all the arguments");
+}
+
+// Ricochet Frames
+const Register MethodHandles::RicochetFrame::L1_continuation      = L1;
+const Register MethodHandles::RicochetFrame::L2_saved_target      = L2;
+const Register MethodHandles::RicochetFrame::L3_saved_args_layout = L3;
+const Register MethodHandles::RicochetFrame::L4_saved_args_base   = L4; // cf. Gargs = G4
+const Register MethodHandles::RicochetFrame::L5_conversion        = L5;
+#ifdef ASSERT
+const Register MethodHandles::RicochetFrame::L0_magic_number_1    = L0;
+#endif //ASSERT
+
+oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, bool write_cache) {
+  if (read_cache) {
+    oop cookie = saved_args_layout();
+    if (cookie != NULL)  return cookie;
+  }
+  oop target = saved_target();
+  oop mtype  = java_lang_invoke_MethodHandle::type(target);
+  oop mtform = java_lang_invoke_MethodType::form(mtype);
+  oop cookie = java_lang_invoke_MethodTypeForm::vmlayout(mtform);
+  if (write_cache)  {
+    (*saved_args_layout_addr()) = cookie;
+  }
+  return cookie;
+}
+
+void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
+                                                          // output params:
+                                                          int* bounce_offset,
+                                                          int* exception_offset,
+                                                          int* frame_size_in_words) {
+  (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
+
+  address start = __ pc();
+
+#ifdef ASSERT
+  __ illtrap(0); __ illtrap(0); __ illtrap(0);
+  // here's a hint of something special:
+  __ set(MAGIC_NUMBER_1, G0);
+  __ set(MAGIC_NUMBER_2, G0);
+#endif //ASSERT
+  __ illtrap(0);  // not reached
+
+  // Return values are in registers.
+  // L1_continuation contains a cleanup continuation we must return
+  // to.
+
+  (*bounce_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.bounce");
+
+  if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+  trace_method_handle(_masm, "ricochet_blob.bounce");
+
+  __ JMP(L1_continuation, 0);
+  __ delayed()->nop();
+  __ illtrap(0);
+
+  DEBUG_ONLY(__ set(MAGIC_NUMBER_2, G0));
+
+  (*exception_offset) = __ pc() - start;
+  BLOCK_COMMENT("ricochet_blob.exception");
+
+  // compare this to Interpreter::rethrow_exception_entry, which is parallel code
+  // for example, see TemplateInterpreterGenerator::generate_throw_exception
+  // Live registers in:
+  //   Oexception  (O0): exception
+  //   Oissuing_pc (O1): return address/pc that threw exception (ignored, always equal to bounce addr)
+  __ verify_oop(Oexception);
+
+  // Take down the frame.
+
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  leave_ricochet_frame(_masm, /*recv_reg=*/ noreg, I5_savedSP, I7);
+
+  // We are done with this activation frame; find out where to go next.
+  // The continuation point will be an exception handler, which expects
+  // the following registers set up:
+  //
+  // Oexception: exception
+  // Oissuing_pc: the local call that threw exception
+  // Other On: garbage
+  // In/Ln:  the contents of the caller's register window
+  //
+  // We do the required restore at the last possible moment, because we
+  // need to preserve some state across a runtime call.
+  // (Remember that the caller activation is unknown--it might not be
+  // interpreted, so things like Lscratch are useless in the caller.)
+  __ mov(Oexception,  Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
+  __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
+  __ call_VM_leaf(L7_thread_cache,
+                  CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  G2_thread, Oissuing_pc->after_save());
+
+  // The caller's SP was adjusted upon method entry to accomodate
+  // the callee's non-argument locals. Undo that adjustment.
+  __ JMP(O0, 0);                         // return exception handler in caller
+  __ delayed()->restore(I5_savedSP, G0, SP);
+
+  // (same old exception object is already in Oexception; see above)
+  // Note that an "issuing PC" is actually the next PC after the call
+}
+
+void MethodHandles::RicochetFrame::enter_ricochet_frame(MacroAssembler* _masm,
+                                                        Register recv_reg,
+                                                        Register argv_reg,
+                                                        address return_handler) {
+  // does not include the __ save()
+  assert(argv_reg == Gargs, "");
+  Address G3_mh_vmtarget(   recv_reg, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
+  Address G3_amh_conversion(recv_reg, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
+
+  // Create the RicochetFrame.
+  // Unlike on x86 we can store all required information in local
+  // registers.
+  BLOCK_COMMENT("push RicochetFrame {");
+  __ set(ExternalAddress(return_handler),          L1_continuation);
+  __ load_heap_oop(G3_mh_vmtarget,                 L2_saved_target);
+  __ mov(G0,                                       L3_saved_args_layout);
+  __ mov(Gargs,                                    L4_saved_args_base);
+  __ lduw(G3_amh_conversion,                       L5_conversion);  // 32-bit field
+  // I5, I6, I7 are already set up
+  DEBUG_ONLY(__ set((int32_t) MAGIC_NUMBER_1,      L0_magic_number_1));
+  BLOCK_COMMENT("} RicochetFrame");
+}
+
+void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
+                                                        Register recv_reg,
+                                                        Register new_sp_reg,
+                                                        Register sender_pc_reg) {
+  assert(new_sp_reg == I5_savedSP, "exact_sender_sp already in place");
+  assert(sender_pc_reg == I7, "in a fixed place");
+  // does not include the __ ret() & __ restore()
+  assert_different_registers(recv_reg, new_sp_reg, sender_pc_reg);
+  // Take down the frame.
+  // Cf. InterpreterMacroAssembler::remove_activation.
+  BLOCK_COMMENT("end_ricochet_frame {");
+  if (recv_reg->is_valid())
+    __ mov(L2_saved_target, recv_reg);
+  BLOCK_COMMENT("} end_ricochet_frame");
+}
+
+// Emit code to verify that FP is pointing at a valid ricochet frame.
+#ifdef ASSERT
+enum {
+  ARG_LIMIT = 255, SLOP = 45,
+  // use this parameter for checking for garbage stack movements:
+  UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
+  // the slop defends against false alarms due to fencepost errors
+};
+
+void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
+  // The stack should look like this:
+  //    ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
+  // Check various invariants.
+
+  Register O7_temp = O7, O5_temp = O5;
+
+  Label L_ok_1, L_ok_2, L_ok_3, L_ok_4;
+  BLOCK_COMMENT("verify_clean {");
+  // Magic numbers must check out:
+  __ set((int32_t) MAGIC_NUMBER_1, O7_temp);
+  __ cmp(O7_temp, L0_magic_number_1);
+  __ br(Assembler::equal, false, Assembler::pt, L_ok_1);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: MAGIC_NUMBER_1 not found");
+
+  __ BIND(L_ok_1);
+
+  // Arguments pointer must look reasonable:
+#ifdef _LP64
+  Register FP_temp = O5_temp;
+  __ add(FP, STACK_BIAS, FP_temp);
+#else
+  Register FP_temp = FP;
+#endif
+  __ cmp(L4_saved_args_base, FP_temp);
+  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok_2);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: L4 < FP");
+
+  __ BIND(L_ok_2);
+  // Disable until we decide on it's fate
+  // __ sub(L4_saved_args_base, UNREASONABLE_STACK_MOVE * Interpreter::stackElementSize, O7_temp);
+  // __ cmp(O7_temp, FP_temp);
+  // __ br(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok_3);
+  // __ delayed()->nop();
+  // __ stop("damaged ricochet frame: (L4 - UNREASONABLE_STACK_MOVE) > FP");
+
+  __ BIND(L_ok_3);
+  extract_conversion_dest_type(_masm, L5_conversion, O7_temp);
+  __ cmp(O7_temp, T_VOID);
+  __ br(Assembler::equal, false, Assembler::pt, L_ok_4);
+  __ delayed()->nop();
+  extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
+  __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
+  assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
+  __ cmp(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok_4);
+  __ delayed()->nop();
+  __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
+  __ BIND(L_ok_4);
+  BLOCK_COMMENT("} verify_clean");
+}
+#endif //ASSERT
+
+void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
+  if (VerifyMethodHandles)
+    verify_klass(_masm, klass_reg, SystemDictionaryHandles::Class_klass(), temp_reg, temp2_reg,
+                 "AMH argument is a Class");
+  __ load_heap_oop(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg);
+}
+
+void MethodHandles::load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg) {
+  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+  assert(CONV_VMINFO_MASK == right_n_bits(BitsPerByte), "else change type of following load");
+  __ ldub(conversion_field_addr.plus_disp(BytesPerInt - 1), reg);
+}
+
+void MethodHandles::extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
+  assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+  __ and3(conversion_field_reg, CONV_VMINFO_MASK, reg);
+}
+
+void MethodHandles::extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg) {
+  __ srl(conversion_field_reg, CONV_DEST_TYPE_SHIFT, reg);
+  __ and3(reg, 0x0F, reg);
+}
+
+void MethodHandles::load_stack_move(MacroAssembler* _masm,
+                                    Address G3_amh_conversion,
+                                    Register stack_move_reg) {
+  BLOCK_COMMENT("load_stack_move {");
+  __ ldsw(G3_amh_conversion, stack_move_reg);
+  __ sra(stack_move_reg, CONV_STACK_MOVE_SHIFT, stack_move_reg);
+  if (VerifyMethodHandles) {
+    Label L_ok, L_bad;
+    int32_t stack_move_limit = 0x0800;  // extra-large
+    __ cmp(stack_move_reg, stack_move_limit);
+    __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ cmp(stack_move_reg, -stack_move_limit);
+    __ br(Assembler::greater, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("load_stack_move of garbage value");
+    __ BIND(L_ok);
+  }
+  BLOCK_COMMENT("} load_stack_move");
+}
+
+#ifdef ASSERT
+void MethodHandles::RicochetFrame::verify() const {
+  assert(magic_number_1() == MAGIC_NUMBER_1, "");
+  if (!Universe::heap()->is_gc_active()) {
+    if (saved_args_layout() != NULL) {
+      assert(saved_args_layout()->is_method(), "must be valid oop");
+    }
+    if (saved_target() != NULL) {
+      assert(java_lang_invoke_MethodHandle::is_instance(saved_target()), "checking frame value");
+    }
+  }
+  int conv_op = adapter_conversion_op(conversion());
+  assert(conv_op == java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS ||
+         conv_op == java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF,
+         "must be a sane conversion");
+  if (has_return_value_slot()) {
+    assert(*return_value_slot_addr() == RETURN_VALUE_PLACEHOLDER, "");
+  }
+}
+
+void MethodHandles::verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
+  // Verify that argslot lies within (Gargs, FP].
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_argslot {");
+  __ add(FP, STACK_BIAS, temp_reg);  // STACK_BIAS is zero on !_LP64
+  __ cmp(argslot_reg, temp_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ cmp(Gargs, argslot_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_argslot");
+}
+
+void MethodHandles::verify_argslots(MacroAssembler* _masm,
+                                    RegisterOrConstant arg_slots,
+                                    Register arg_slot_base_reg,
+                                    Register temp_reg,
+                                    Register temp2_reg,
+                                    bool negate_argslots,
+                                    const char* error_message) {
+  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_argslots {");
+  if (negate_argslots) {
+    if (arg_slots.is_constant()) {
+      arg_slots = -1 * arg_slots.as_constant();
+    } else {
+      __ neg(arg_slots.as_register(), temp_reg);
+      arg_slots = temp_reg;
+    }
+  }
+  __ add(arg_slot_base_reg, __ argument_offset(arg_slots, temp_reg), temp_reg);
+  __ add(FP, STACK_BIAS, temp2_reg);  // STACK_BIAS is zero on !_LP64
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  // Gargs points to the first word so adjust by BytesPerWord
+  __ add(arg_slot_base_reg, BytesPerWord, temp_reg);
+  __ cmp(Gargs, temp_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_argslots");
+}
+
+// Make sure that arg_slots has the same sign as the given direction.
+// If (and only if) arg_slots is a assembly-time constant, also allow it to be zero.
+void MethodHandles::verify_stack_move(MacroAssembler* _masm,
+                                      RegisterOrConstant arg_slots, int direction) {
+  enum { UNREASONABLE_STACK_MOVE = 256 * 4 };  // limit of 255 arguments
+  bool allow_zero = arg_slots.is_constant();
+  if (direction == 0) { direction = +1; allow_zero = true; }
+  assert(stack_move_unit() == -1, "else add extra checks here");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    BLOCK_COMMENT("verify_stack_move {");
+    // __ btst(-stack_move_unit() - 1, arg_slots.as_register());  // no need
+    // __ br(Assembler::notZero, false, Assembler::pn, L_bad);
+    // __ delayed()->nop();
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    if (direction > 0) {
+      __ br(allow_zero ? Assembler::less : Assembler::lessEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+      __ cmp(arg_slots.as_register(), (int32_t) UNREASONABLE_STACK_MOVE);
+      __ br(Assembler::less, false, Assembler::pn, L_ok);
+      __ delayed()->nop();
+    } else {
+      __ br(allow_zero ? Assembler::greater : Assembler::greaterEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+      __ cmp(arg_slots.as_register(), (int32_t) -UNREASONABLE_STACK_MOVE);
+      __ br(Assembler::greater, false, Assembler::pn, L_ok);
+      __ delayed()->nop();
+    }
+    __ BIND(L_bad);
+    if (direction > 0)
+      __ stop("assert arg_slots > 0");
+    else
+      __ stop("assert arg_slots < 0");
+    __ BIND(L_ok);
+    BLOCK_COMMENT("} verify_stack_move");
+  } else {
+    intptr_t size = arg_slots.as_constant();
+    if (direction < 0)  size = -size;
+    assert(size >= 0, "correct direction of constant move");
+    assert(size < UNREASONABLE_STACK_MOVE, "reasonable size of constant move");
+  }
+}
+
+void MethodHandles::verify_klass(MacroAssembler* _masm,
+                                 Register obj_reg, KlassHandle klass,
+                                 Register temp_reg, Register temp2_reg,
+                                 const char* error_message) {
+  oop* klass_addr = klass.raw_value();
+  assert(klass_addr >= SystemDictionaryHandles::Object_klass().raw_value() &&
+         klass_addr <= SystemDictionaryHandles::Long_klass().raw_value(),
+         "must be one of the SystemDictionaryHandles");
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_klass {");
+  __ verify_oop(obj_reg);
+  __ br_null(obj_reg, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ load_klass(obj_reg, temp_reg);
+  __ set(ExternalAddress(klass_addr), temp2_reg);
+  __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  intptr_t super_check_offset = klass->super_check_offset();
+  __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg);
+  __ set(ExternalAddress(klass_addr), temp2_reg);
+  __ ld_ptr(Address(temp2_reg, 0), temp2_reg);
+  __ cmp(temp_reg, temp2_reg);
+  __ brx(Assembler::equal, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_klass");
+}
+#endif // ASSERT
+
+
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp) {
+  assert(method == G5_method, "interpreter calling convention");
+  __ verify_oop(method);
+  __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
+  if (JvmtiExport::can_post_interpreter_events()) {
+    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+    // compiled code in threads for which the event is enabled.  Check here for
+    // interp_only_mode if these events CAN be enabled.
+    __ verify_thread();
+    Label skip_compiled_code;
+
+    const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
+    __ ld(interp_only, temp);
+    __ tst(temp);
+    __ br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
+    __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
+    __ bind(skip_compiled_code);
+  }
+  __ jmp(target, 0);
+  __ delayed()->nop();
+}
+
 
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
@@ -94,8 +572,9 @@
   __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
   __ delayed()->nop();
   __ mov(O0_mtype, G5_method_type);  // required by throw_WrongMethodType
-  // mov(G3_method_handle, G3_method_handle);  // already in this register
-  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
+  __ mov(G3_method_handle, G3_method_handle);  // already in this register
+  // O0 will be filled in with JavaThread in stub
+  __ jump_to(AddressLiteral(StubRoutines::throw_WrongMethodTypeException_entry()), O3_scratch);
   __ delayed()->nop();
 
   // here's where control starts out:
@@ -103,6 +582,9 @@
   address entry_point = __ pc();
 
   // fetch the MethodType from the method handle
+  // FIXME: Interpreter should transmit pre-popped stack pointer, to locate base of arg list.
+  // This would simplify several touchy bits of code.
+  // See 6984712: JSR 292 method handle calls need a clean argument base pointer
   {
     Register tem = G5_method;
     for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
@@ -114,19 +596,25 @@
   // given the MethodType, find out where the MH argument is buried
   __ load_heap_oop(Address(O0_mtype,   __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes,        O1_scratch)), O4_argslot);
   __ ldsw(         Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
-  __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
+  __ add(__ argument_address(O4_argslot, O4_argslot, 1), O4_argbase);
   // Note: argument_address uses its input as a scratch register!
-  __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
+  Address mh_receiver_slot_addr(O4_argbase, -Interpreter::stackElementSize);
+  __ ld_ptr(mh_receiver_slot_addr, G3_method_handle);
 
   trace_method_handle(_masm, "invokeExact");
 
   __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
+
+  // Nobody uses the MH receiver slot after this.  Make sure.
+  DEBUG_ONLY(__ set((int32_t) 0x999999, O1_scratch); __ st_ptr(O1_scratch, mh_receiver_slot_addr));
+
   __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
 
   // for invokeGeneric (only), apply argument and result conversions on the fly
   __ bind(invoke_generic_slow_path);
 #ifdef ASSERT
-  { Label L;
+  if (VerifyMethodHandles) {
+    Label L;
     __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
     __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
     __ brx(Assembler::equal, false, Assembler::pt, L);
@@ -137,7 +625,7 @@
 #endif //ASSERT
 
   // make room on the stack for another pointer:
-  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch);
+  insert_arg_slots(_masm, 2 * stack_move_unit(), O4_argbase, O1_scratch, O2_scratch, O3_scratch);
   // load up an adapter from the calling type (Java weaves this)
   Register O2_form    = O2_scratch;
   Register O3_adapter = O3_scratch;
@@ -157,74 +645,88 @@
   return entry_point;
 }
 
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+static RegisterOrConstant constant(int value) {
+  return RegisterOrConstant(value);
+}
 
+static void load_vmargslot(MacroAssembler* _masm, Address vmargslot_addr, Register result) {
+  __ ldsw(vmargslot_addr, result);
+}
+
+static RegisterOrConstant adjust_SP_and_Gargs_down_by_slots(MacroAssembler* _masm,
+                                                            RegisterOrConstant arg_slots,
+                                                            Register temp_reg, Register temp2_reg) {
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  if (arg_slots.is_constant()) {
+    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
+    const int masked_offset = round_to(offset, 2 * BytesPerWord);
+    const int masked_offset2 = (offset + 1*BytesPerWord) & ~TwoWordAlignmentMask;
+    assert(masked_offset == masked_offset2, "must agree");
+    __ sub(Gargs,        offset, Gargs);
+    __ sub(SP,    masked_offset, SP   );
+    return offset;
+  } else {
 #ifdef ASSERT
-static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
-  // Verify that argslot lies within (Gargs, FP].
-  Label L_ok, L_bad;
-  BLOCK_COMMENT("{ verify_argslot");
-#ifdef _LP64
-  __ add(FP, STACK_BIAS, temp_reg);
-  __ cmp(argslot_reg, temp_reg);
-#else
-  __ cmp(argslot_reg, FP);
+    {
+      Label L_ok;
+      __ cmp(arg_slots.as_register(), 0);
+      __ br(Assembler::greaterEqual, false, Assembler::pt, L_ok);
+      __ delayed()->nop();
+      __ stop("negative arg_slots");
+      __ bind(L_ok);
+    }
 #endif
-  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
-  __ delayed()->nop();
-  __ cmp(Gargs, argslot_reg);
-  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-  __ delayed()->nop();
-  __ bind(L_bad);
-  __ stop(error_message);
-  __ bind(L_ok);
-  BLOCK_COMMENT("} verify_argslot");
+    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
+    __ add( temp_reg,  1*BytesPerWord,       temp2_reg);
+    __ andn(temp2_reg, TwoWordAlignmentMask, temp2_reg);
+    __ sub(Gargs, temp_reg,  Gargs);
+    __ sub(SP,    temp2_reg, SP   );
+    return temp_reg;
+  }
 }
-#endif
 
+static RegisterOrConstant adjust_SP_and_Gargs_up_by_slots(MacroAssembler* _masm,
+                                                          RegisterOrConstant arg_slots,
+                                                          Register temp_reg, Register temp2_reg) {
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  if (arg_slots.is_constant()) {
+    const int        offset = arg_slots.as_constant() << LogBytesPerWord;
+    const int masked_offset = offset & ~TwoWordAlignmentMask;
+    __ add(Gargs,        offset, Gargs);
+    __ add(SP,    masked_offset, SP   );
+    return offset;
+  } else {
+    __ sll_ptr(arg_slots.as_register(), LogBytesPerWord, temp_reg);
+    __ andn(temp_reg, TwoWordAlignmentMask, temp2_reg);
+    __ add(Gargs, temp_reg,  Gargs);
+    __ add(SP,    temp2_reg, SP   );
+    return temp_reg;
+  }
+}
 
 // Helper to insert argument slots into the stack.
-// arg_slots must be a multiple of stack_move_unit() and <= 0
+// arg_slots must be a multiple of stack_move_unit() and < 0
+// argslot_reg is decremented to point to the new (shifted) location of the argslot
+// But, temp_reg ends up holding the original value of argslot_reg.
 void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
                                      RegisterOrConstant arg_slots,
-                                     int arg_mask,
                                      Register argslot_reg,
                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
-  assert(temp3_reg != noreg, "temp3 required");
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
+
   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 
-#ifdef ASSERT
-  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ br(Assembler::greater, false, Assembler::pn, L_bad);
-    __ delayed()->nop();
-    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
-    __ br(Assembler::zero, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ bind(L_bad);
-    __ stop("assert arg_slots <= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() <= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif // ASSERT
-
-#ifdef _LP64
-  if (arg_slots.is_register()) {
-    // Was arg_slots register loaded as signed int?
-    Label L_ok;
-    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
-    __ sra(temp_reg, BitsPerInt, temp_reg);
-    __ cmp(arg_slots.as_register(), temp_reg);
-    __ br(Assembler::equal, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ stop("arg_slots register not loaded as signed int");
-    __ bind(L_ok);
-  }
-#endif
+  BLOCK_COMMENT("insert_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, -1);
 
   // Make space on the stack for the inserted argument(s).
   // Then pull down everything shallower than argslot_reg.
@@ -234,26 +736,20 @@
   //   for (temp = sp + size; temp < argslot; temp++)
   //     temp[-size] = temp[0]
   //   argslot -= size;
-  BLOCK_COMMENT("insert_arg_slots {");
-  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
 
-  // Keep the stack pointer 2*wordSize aligned.
-  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
-  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
-  __ add(SP, masked_offset, SP);
-
-  __ mov(Gargs, temp_reg);  // source pointer for copy
-  __ add(Gargs, offset, Gargs);
+  // offset is temp3_reg in case of arg_slots being a register.
+  RegisterOrConstant offset = adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
+  __ sub(Gargs, offset, temp_reg);  // source pointer for copy
 
   {
     Label loop;
     __ BIND(loop);
     // pull one word down each time through the loop
-    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
-    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ add(temp_reg, wordSize, temp_reg);
     __ cmp(temp_reg, argslot_reg);
-    __ brx(Assembler::less, false, Assembler::pt, loop);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, loop);
     __ delayed()->nop();  // FILLME
   }
 
@@ -264,39 +760,24 @@
 
 
 // Helper to remove argument slots from the stack.
-// arg_slots must be a multiple of stack_move_unit() and >= 0
+// arg_slots must be a multiple of stack_move_unit() and > 0
 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                      RegisterOrConstant arg_slots,
                                      Register argslot_reg,
                                      Register temp_reg, Register temp2_reg, Register temp3_reg) {
-  assert(temp3_reg != noreg, "temp3 required");
+  // allow constant zero
+  if (arg_slots.is_constant() && arg_slots.as_constant() == 0)
+    return;
   assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
                              (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
 
-  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+  BLOCK_COMMENT("remove_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_argslots(_masm, arg_slots, argslot_reg, temp_reg, temp2_reg, false,
+                    "deleted argument(s) must fall within current frame");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, arg_slots, +1);
 
-#ifdef ASSERT
-  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
-  __ add(argslot_reg, offset, temp2_reg);
-  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
-  if (arg_slots.is_register()) {
-    Label L_ok, L_bad;
-    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
-    __ br(Assembler::less, false, Assembler::pn, L_bad);
-    __ delayed()->nop();
-    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
-    __ br(Assembler::zero, false, Assembler::pt, L_ok);
-    __ delayed()->nop();
-    __ bind(L_bad);
-    __ stop("assert arg_slots >= 0 and clear low bits");
-    __ bind(L_ok);
-  } else {
-    assert(arg_slots.as_constant() >= 0, "");
-    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
-  }
-#endif // ASSERT
-
-  BLOCK_COMMENT("remove_arg_slots {");
   // Pull up everything shallower than argslot.
   // Then remove the excess space on the stack.
   // The stacked return address gets pulled up with everything else.
@@ -305,39 +786,271 @@
   //     temp[size] = temp[0]
   //   argslot += size;
   //   sp += size;
+
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
   __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
+
   {
-    Label loop;
-    __ BIND(loop);
+    Label L_loop;
+    __ BIND(L_loop);
     // pull one word up each time through the loop
-    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
-    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ ld_ptr(           Address(temp_reg, 0     ), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset)           );
     __ sub(temp_reg, wordSize, temp_reg);
     __ cmp(temp_reg, Gargs);
-    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
+    __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_loop);
     __ delayed()->nop();  // FILLME
   }
 
-  // Now move the argslot up, to point to the just-copied block.
-  __ add(Gargs, offset, Gargs);
   // And adjust the argslot address to point at the deletion point.
   __ add(argslot_reg, offset, argslot_reg);
 
-  // Keep the stack pointer 2*wordSize aligned.
-  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
-  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
-  __ add(SP, masked_offset, SP);
+  // We don't need the offset at this point anymore, just adjust SP and Gargs.
+  (void) adjust_SP_and_Gargs_up_by_slots(_masm, arg_slots, temp3_reg, temp_reg);
+
   BLOCK_COMMENT("} remove_arg_slots");
 }
 
+// Helper to copy argument slots to the top of the stack.
+// The sequence starts with argslot_reg and is counted by slot_count
+// slot_count must be a multiple of stack_move_unit() and >= 0
+// This function blows the temps but does not change argslot_reg.
+void MethodHandles::push_arg_slots(MacroAssembler* _masm,
+                                   Register argslot_reg,
+                                   RegisterOrConstant slot_count,
+                                   Register temp_reg, Register temp2_reg) {
+  // allow constant zero
+  if (slot_count.is_constant() && slot_count.as_constant() == 0)
+    return;
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg,
+                             (!slot_count.is_register() ? Gargs : slot_count.as_register()),
+                             SP);
+  assert(Interpreter::stackElementSize == wordSize, "else change this code");
+
+  BLOCK_COMMENT("push_arg_slots {");
+  if (VerifyMethodHandles)
+    verify_stack_move(_masm, slot_count, 0);
+
+  RegisterOrConstant offset = adjust_SP_and_Gargs_down_by_slots(_masm, slot_count, temp2_reg, temp_reg);
+
+  if (slot_count.is_constant()) {
+    for (int i = slot_count.as_constant() - 1; i >= 0; i--) {
+      __ ld_ptr(          Address(argslot_reg, i * wordSize), temp_reg);
+      __ st_ptr(temp_reg, Address(Gargs,       i * wordSize));
+    }
+  } else {
+    Label L_plural, L_loop, L_break;
+    // Emit code to dynamically check for the common cases, zero and one slot.
+    __ cmp(slot_count.as_register(), (int32_t) 1);
+    __ br(Assembler::greater, false, Assembler::pn, L_plural);
+    __ delayed()->nop();
+    __ br(Assembler::less, false, Assembler::pn, L_break);
+    __ delayed()->nop();
+    __ ld_ptr(          Address(argslot_reg, 0), temp_reg);
+    __ st_ptr(temp_reg, Address(Gargs,       0));
+    __ ba(false, L_break);
+    __ delayed()->nop();  // FILLME
+    __ BIND(L_plural);
+
+    // Loop for 2 or more:
+    //   top = &argslot[slot_count]
+    //   while (top > argslot)  *(--Gargs) = *(--top)
+    Register top_reg = temp_reg;
+    __ add(argslot_reg, offset, top_reg);
+    __ add(Gargs,       offset, Gargs  );  // move back up again so we can go down
+    __ BIND(L_loop);
+    __ sub(top_reg, wordSize, top_reg);
+    __ sub(Gargs,   wordSize, Gargs  );
+    __ ld_ptr(           Address(top_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(Gargs,   0));
+    __ cmp(top_reg, argslot_reg);
+    __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+    __ delayed()->nop();  // FILLME
+    __ BIND(L_break);
+  }
+  BLOCK_COMMENT("} push_arg_slots");
+}
+
+// in-place movement; no change to Gargs
+// blows temp_reg, temp2_reg
+void MethodHandles::move_arg_slots_up(MacroAssembler* _masm,
+                                      Register bottom_reg,  // invariant
+                                      Address  top_addr,    // can use temp_reg
+                                      RegisterOrConstant positive_distance_in_slots,  // destroyed if register
+                                      Register temp_reg, Register temp2_reg) {
+  assert_different_registers(bottom_reg,
+                             temp_reg, temp2_reg,
+                             positive_distance_in_slots.register_or_noreg());
+  BLOCK_COMMENT("move_arg_slots_up {");
+  Label L_loop, L_break;
+  Register top_reg = temp_reg;
+  if (!top_addr.is_same_address(Address(top_reg, 0))) {
+    __ add(top_addr, top_reg);
+  }
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (positive_distance_in_slots.is_register()) {
+      __ cmp(positive_distance_in_slots.as_register(), (int32_t) 0);
+      __ br(Assembler::lessEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+    }
+    __ cmp(bottom_reg, top_reg);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("valid bounds (copy up)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
+  __ delayed()->nop();
+  // work top down to bottom, copying contiguous data upwards
+  // In pseudo-code:
+  //   while (--top >= bottom) *(top + distance) = *(top + 0);
+  RegisterOrConstant offset = __ argument_offset(positive_distance_in_slots, positive_distance_in_slots.register_or_noreg());
+  __ BIND(L_loop);
+  __ sub(top_reg, wordSize, top_reg);
+  __ ld_ptr(           Address(top_reg, 0     ), temp2_reg);
+  __ st_ptr(temp2_reg, Address(top_reg, offset)           );
+  __ cmp(top_reg, bottom_reg);
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+  __ delayed()->nop();  // FILLME
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ BIND(L_break);
+  BLOCK_COMMENT("} move_arg_slots_up");
+}
+
+// in-place movement; no change to rsp
+// blows temp_reg, temp2_reg
+void MethodHandles::move_arg_slots_down(MacroAssembler* _masm,
+                                        Address  bottom_addr,  // can use temp_reg
+                                        Register top_reg,      // invariant
+                                        RegisterOrConstant negative_distance_in_slots,  // destroyed if register
+                                        Register temp_reg, Register temp2_reg) {
+  assert_different_registers(top_reg,
+                             negative_distance_in_slots.register_or_noreg(),
+                             temp_reg, temp2_reg);
+  BLOCK_COMMENT("move_arg_slots_down {");
+  Label L_loop, L_break;
+  Register bottom_reg = temp_reg;
+  if (!bottom_addr.is_same_address(Address(bottom_reg, 0))) {
+    __ add(bottom_addr, bottom_reg);
+  }
+  // Detect empty (or broken) loop:
+#ifdef ASSERT
+  assert(!negative_distance_in_slots.is_constant() || negative_distance_in_slots.as_constant() < 0, "");
+  if (VerifyMethodHandles) {
+    // Verify that &bottom < &top (non-empty interval)
+    Label L_ok, L_bad;
+    if (negative_distance_in_slots.is_register()) {
+      __ cmp(negative_distance_in_slots.as_register(), (int32_t) 0);
+      __ br(Assembler::greaterEqual, false, Assembler::pn, L_bad);
+      __ delayed()->nop();
+    }
+    __ cmp(bottom_reg, top_reg);
+    __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ BIND(L_bad);
+    __ stop("valid bounds (copy down)");
+    __ BIND(L_ok);
+  }
+#endif
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pn, L_break);
+  __ delayed()->nop();
+  // work bottom up to top, copying contiguous data downwards
+  // In pseudo-code:
+  //   while (bottom < top) *(bottom - distance) = *(bottom + 0), bottom++;
+  RegisterOrConstant offset = __ argument_offset(negative_distance_in_slots, negative_distance_in_slots.register_or_noreg());
+  __ BIND(L_loop);
+  __ ld_ptr(           Address(bottom_reg, 0     ), temp2_reg);
+  __ st_ptr(temp2_reg, Address(bottom_reg, offset)           );
+  __ add(bottom_reg, wordSize, bottom_reg);
+  __ cmp(bottom_reg, top_reg);
+  __ brx(Assembler::lessUnsigned, false, Assembler::pt, L_loop);
+  __ delayed()->nop();  // FILLME
+  assert(Interpreter::stackElementSize == wordSize, "else change loop");
+  __ BIND(L_break);
+  BLOCK_COMMENT("} move_arg_slots_down");
+}
+
+// Copy from a field or array element to a stacked argument slot.
+// is_element (ignored) says whether caller is loading an array element instead of an instance field.
+void MethodHandles::move_typed_arg(MacroAssembler* _masm,
+                                   BasicType type, bool is_element,
+                                   Address value_src, Address slot_dest,
+                                   Register temp_reg) {
+  assert(!slot_dest.uses(temp_reg), "must be different register");
+  BLOCK_COMMENT(!is_element ? "move_typed_arg {" : "move_typed_arg { (array element)");
+  if (type == T_OBJECT || type == T_ARRAY) {
+    __ load_heap_oop(value_src, temp_reg);
+    __ verify_oop(temp_reg);
+    __ st_ptr(temp_reg, slot_dest);
+  } else if (type != T_VOID) {
+    int  arg_size      = type2aelembytes(type);
+    bool arg_is_signed = is_signed_subword_type(type);
+    int  slot_size     = is_subword_type(type) ? type2aelembytes(T_INT) : arg_size;  // store int sub-words as int
+    __ load_sized_value( value_src, temp_reg, arg_size, arg_is_signed);
+    __ store_sized_value(temp_reg, slot_dest, slot_size              );
+  }
+  BLOCK_COMMENT("} move_typed_arg");
+}
+
+// Cf. TemplateInterpreterGenerator::generate_return_entry_for and
+// InterpreterMacroAssembler::save_return_value
+void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
+                                      Address return_slot) {
+  BLOCK_COMMENT("move_return_value {");
+  // Look at the type and pull the value out of the corresponding register.
+  if (type == T_VOID) {
+    // nothing to do
+  } else if (type == T_OBJECT) {
+    __ verify_oop(O0);
+    __ st_ptr(O0, return_slot);
+  } else if (type == T_INT || is_subword_type(type)) {
+    int type_size = type2aelembytes(T_INT);
+    __ store_sized_value(O0, return_slot, type_size);
+  } else if (type == T_LONG) {
+    // store the value by parts
+    // Note: We assume longs are continguous (if misaligned) on the interpreter stack.
+#if !defined(_LP64) && defined(COMPILER2)
+    __ stx(G1, return_slot);
+#else
+  #ifdef _LP64
+    __ stx(O0, return_slot);
+  #else
+    if (return_slot.has_disp()) {
+      // The displacement is a constant
+      __ st(O0, return_slot);
+      __ st(O1, return_slot.plus_disp(Interpreter::stackElementSize));
+    } else {
+      __ std(O0, return_slot);
+    }
+  #endif
+#endif
+  } else if (type == T_FLOAT) {
+    __ stf(FloatRegisterImpl::S, Ftos_f, return_slot);
+  } else if (type == T_DOUBLE) {
+    __ stf(FloatRegisterImpl::D, Ftos_f, return_slot);
+  } else {
+    ShouldNotReachHere();
+  }
+  BLOCK_COMMENT("} move_return_value");
+}
 
 #ifndef PRODUCT
 extern "C" void print_method_handle(oop mh);
 void trace_method_handle_stub(const char* adaptername,
                               oopDesc* mh,
                               intptr_t* saved_sp) {
+  bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have mh
   tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
-  print_method_handle(mh);
+  if (has_mh)
+    print_method_handle(mh);
 }
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
   if (!TraceMethodHandles)  return;
@@ -367,13 +1080,21 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
+          // OP_PRIM_TO_REF is below...
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+          // OP_COLLECT_ARGS is below...
+         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
+         |(!UseRicochetFrames ? 0 :
+           java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
+           ((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
+           |(1<<java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS)
+           )
+          )
          );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 }
 
 //------------------------------------------------------------------------------
@@ -382,19 +1103,25 @@
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
+  MethodHandles::EntryKind ek_orig = ek_original_kind(ek);
+
   // Here is the register state during an interpreted call,
   // as set up by generate_method_handle_interpreter_entry():
   // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
   // - G3: receiver method handle
   // - O5_savedSP: sender SP (must preserve)
 
-  const Register O0_argslot = O0;
+  const Register O0_scratch = O0;
   const Register O1_scratch = O1;
   const Register O2_scratch = O2;
   const Register O3_scratch = O3;
-  const Register G5_index   = G5;
+  const Register O4_scratch = O4;
+  const Register G5_scratch = G5;
 
-  // Argument registers for _raise_exception.
+  // Often used names:
+  const Register O0_argslot = O0;
+
+  // Argument registers for _raise_exception:
   const Register O0_code     = O0;
   const Register O1_actual   = O1;
   const Register O2_required = O2;
@@ -402,9 +1129,6 @@
   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 
   // Some handy addresses:
-  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
-  Address G5_method_fce(    G5_method,        in_bytes(methodOopDesc::from_compiled_offset()));
-
   Address G3_mh_vmtarget(   G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
 
   Address G3_dmh_vmindex(   G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
@@ -427,38 +1151,29 @@
 
   trace_method_handle(_masm, entry_name(ek));
 
+  BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
+
   switch ((int) ek) {
   case _raise_exception:
     {
       // Not a real MH entry, but rather shared code for raising an
-      // exception.  Since we use the compiled entry, arguments are
-      // expected in compiler argument registers.
+      // exception.  For sharing purposes the arguments are passed into registers
+      // and then placed in the intepreter calling convention here.
       assert(raise_exception_method(), "must be set");
       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
-      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
-
-      Label L_no_method;
-      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
       __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
       __ ld_ptr(Address(G5_method, 0), G5_method);
-      __ tst(G5_method);
-      __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
-      __ delayed()->nop();
 
       const int jobject_oop_offset = 0;
       __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
-      __ tst(G5_method);
-      __ brx(Assembler::zero, false, Assembler::pn, L_no_method);
-      __ delayed()->nop();
+
+      adjust_SP_and_Gargs_down_by_slots(_masm, 3, noreg, noreg);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fce, O3_scratch);  // jump to compiled entry
-      __ delayed()->nop();
-
-      // Do something that is at least causes a valid throw from the interpreter.
-      __ bind(L_no_method);
-      __ unimplemented("call throw_WrongMethodType_entry");
+      __ st_ptr(O0_code,     __ argument_address(constant(2), noreg, 0));
+      __ st_ptr(O1_actual,   __ argument_address(constant(1), noreg, 0));
+      __ st_ptr(O2_required, __ argument_address(constant(0), noreg, 0));
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -466,18 +1181,16 @@
   case _invokespecial_mh:
     {
       __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
-      __ verify_oop(G5_method);
       // Same as TemplateTable::invokestatic or invokespecial,
       // minus the CP setup and profiling:
       if (ek == _invokespecial_mh) {
         // Must load & check the first argument before entering the target method.
         __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
-        __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+        __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
         __ null_check(G3_method_handle);
         __ verify_oop(G3_method_handle);
       }
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -488,10 +1201,11 @@
 
       // Pick out the vtable index and receiver offset from the MH,
       // and then we can discard it:
+      Register O2_index = O2_scratch;
       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
-      __ ldsw(G3_dmh_vmindex, G5_index);
+      __ ldsw(G3_dmh_vmindex, O2_index);
       // Note:  The verifier allows us to ignore G3_mh_vmtarget.
-      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 
       // Get receiver klass:
@@ -503,14 +1217,12 @@
       const int base = instanceKlass::vtable_start_offset() * wordSize;
       assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
 
-      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
-      __ add(O0_klass, G5_index, O0_klass);
+      __ sll_ptr(O2_index, LogBytesPerWord, O2_index);
+      __ add(O0_klass, O2_index, O0_klass);
       Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
       __ ld_ptr(vtable_entry_addr, G5_method);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
     }
     break;
 
@@ -520,9 +1232,10 @@
       // minus the CP setup and profiling:
       __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
       Register O1_intf  = O1_scratch;
+      Register G5_index = G5_scratch;
       __ load_heap_oop(G3_mh_vmtarget, O1_intf);
       __ ldsw(G3_dmh_vmindex, G5_index);
-      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ ld_ptr(__ argument_address(O0_argslot, O0_argslot, -1), G3_method_handle);
       __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
 
       // Get receiver klass:
@@ -540,9 +1253,7 @@
                                  O3_scratch,
                                  no_such_interface);
 
-      __ verify_oop(G5_method);
-      __ jump_indirect_to(G5_method_fie, O1_scratch);
-      __ delayed()->nop();
+      jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
 
       __ bind(no_such_interface);
       // Throw an exception.
@@ -563,16 +1274,14 @@
   case _bound_long_direct_mh:
     {
       const bool direct_to_method = (ek >= _bound_ref_direct_mh);
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = _INSERT_NO_MASK;
-      int       arg_slots = -1;
-      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
+      BasicType arg_type  = ek_bound_mh_arg_type(ek);
+      int       arg_slots = type2size[arg_type];
 
       // Make room for the new argument:
-      __ ldsw(G3_bmh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_bmh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
-      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 
       // Store bound argument into the new stack slot:
       __ load_heap_oop(G3_bmh_argument, O1_scratch);
@@ -580,16 +1289,15 @@
         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
       } else {
         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
-        const int arg_size = type2aelembytes(arg_type);
-        __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type));
-        __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size);  // long store uses O2/O3 on !_LP64
+        move_typed_arg(_masm, arg_type, false,
+                       prim_value_addr,
+                       Address(O0_argslot, 0),
+                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
       }
 
       if (direct_to_method) {
         __ load_heap_oop(G3_mh_vmtarget, G5_method);  // target is a methodOop
-        __ verify_oop(G5_method);
-        __ jump_indirect_to(G5_method_fie, O1_scratch);
-        __ delayed()->nop();
+        jump_from_method_handle(_masm, G5_method, O1_scratch, O2_scratch);
       } else {
         __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
         __ verify_oop(G3_method_handle);
@@ -602,6 +1310,7 @@
   case _adapter_retype_raw:
     // Immediately jump to the next MH layer:
     __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
+    __ verify_oop(G3_method_handle);
     __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     // This is OK when all parameter types widen.
     // It is also OK when a return type narrows.
@@ -609,30 +1318,28 @@
 
   case _adapter_check_cast:
     {
-      // Temps:
-      Register G5_klass = G5_index;  // Interesting AMH data.
-
       // Check a reference argument before jumping to the next layer of MH:
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      Address vmarg = __ argument_address(O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot, O0_argslot);
 
       // What class are we casting to?
-      __ load_heap_oop(G3_amh_argument, G5_klass);  // This is a Class object!
-      __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
+      Register O1_klass = O1_scratch;  // Interesting AMH data.
+      __ load_heap_oop(G3_amh_argument, O1_klass);  // This is a Class object!
+      load_klass_from_Class(_masm, O1_klass, O2_scratch, O3_scratch);
 
-      Label done;
-      __ ld_ptr(vmarg, O1_scratch);
-      __ tst(O1_scratch);
-      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
+      Label L_done;
+      __ ld_ptr(vmarg, O2_scratch);
+      __ tst(O2_scratch);
+      __ brx(Assembler::zero, false, Assembler::pn, L_done);  // No cast if null.
       __ delayed()->nop();
-      __ load_klass(O1_scratch, O1_scratch);
+      __ load_klass(O2_scratch, O2_scratch);
 
       // Live at this point:
-      // - G5_klass        :  klass required by the target method
       // - O0_argslot      :  argslot index in vmarg; may be required in the failing path
-      // - O1_scratch      :  argument klass to test
+      // - O1_klass        :  klass required by the target method
+      // - O2_scratch      :  argument klass to test
       // - G3_method_handle:  adapter method handle
-      __ check_klass_subtype(O1_scratch, G5_klass, O2_scratch, O3_scratch, done);
+      __ check_klass_subtype(O2_scratch, O1_klass, O3_scratch, O4_scratch, L_done);
 
       // If we get here, the type check failed!
       __ load_heap_oop(G3_amh_argument,        O2_required);  // required class
@@ -640,7 +1347,7 @@
       __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
       __ delayed()->mov(Bytecodes::_checkcast, O0_code);      // who is complaining?
 
-      __ bind(done);
+      __ BIND(L_done);
       // Get the new MH:
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -659,14 +1366,14 @@
   case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
     {
       // Perform an in-place conversion to int or an int subword.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
       Address value;
-      Address vmarg = __ argument_address(O0_argslot);
+      Address vmarg;
       bool value_left_justified = false;
 
       switch (ek) {
       case _adapter_opt_i2i:
-        value = vmarg;
+        value = vmarg = __ argument_address(O0_argslot, O0_argslot);
         break;
       case _adapter_opt_l2i:
         {
@@ -675,13 +1382,13 @@
           // In V9, longs are given 2 64-bit slots in the interpreter, but the
           // data is passed in only 1 slot.
           // Keep the second slot.
-          __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot);
+          __ add(__ argument_address(O0_argslot, O0_argslot, -1), O0_argslot);
           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
           value = Address(O0_argslot, 4);  // Get least-significant 32-bit of 64-bit value.
           vmarg = Address(O0_argslot, Interpreter::stackElementSize);
 #else
           // Keep the first slot.
-          __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+          __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
           remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
           value = Address(O0_argslot, 0);
           vmarg = value;
@@ -690,6 +1397,7 @@
         break;
       case _adapter_opt_unboxi:
         {
+          vmarg = __ argument_address(O0_argslot, O0_argslot);
           // Load the value up from the heap.
           __ ld_ptr(vmarg, O1_scratch);
           int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
@@ -712,7 +1420,7 @@
       }
 
       // This check is required on _BIG_ENDIAN
-      Register G5_vminfo = G5_index;
+      Register G5_vminfo = G5_scratch;
       __ ldsw(G3_amh_conversion, G5_vminfo);
       assert(CONV_VMINFO_SHIFT == 0, "preshifted");
 
@@ -748,13 +1456,13 @@
   case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
     {
       // Perform an in-place int-to-long or ref-to-long conversion.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
 
       // On big-endian machine we duplicate the slot and store the MSW
       // in the first slot.
-      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot, 1), O0_argslot);
 
-      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
+      insert_arg_slots(_masm, stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
 
       Address arg_lsw(O0_argslot, 0);
       Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
@@ -816,103 +1524,84 @@
   case _adapter_opt_rot_2_up:
   case _adapter_opt_rot_2_down:
     {
-      int swap_bytes = 0, rotate = 0;
-      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
+      int swap_slots = ek_adapter_opt_swap_slots(ek);
+      int rotate     = ek_adapter_opt_swap_mode(ek);
 
       // 'argslot' is the position of the first argument to swap.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
+      if (VerifyMethodHandles)
+        verify_argslot(_masm, O0_argslot, O2_scratch, "swap point must fall within current frame");
 
       // 'vminfo' is the second.
       Register O1_destslot = O1_scratch;
-      __ ldsw(G3_amh_conversion, O1_destslot);
-      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
-      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
-      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
+      load_conversion_vminfo(_masm, G3_amh_conversion, O1_destslot);
+      __ add(__ argument_address(O1_destslot, O1_destslot), O1_destslot);
+      if (VerifyMethodHandles)
+        verify_argslot(_masm, O1_destslot, O2_scratch, "swap point must fall within current frame");
 
+      assert(Interpreter::stackElementSize == wordSize, "else rethink use of wordSize here");
       if (!rotate) {
-        for (int i = 0; i < swap_bytes; i += wordSize) {
-          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
-          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
-          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
-          __ st_ptr(O2_scratch, Address(O1_destslot, i));
+        // simple swap
+        for (int i = 0; i < swap_slots; i++) {
+          __ ld_ptr(            Address(O0_argslot,  i * wordSize), O2_scratch);
+          __ ld_ptr(            Address(O1_destslot, i * wordSize), O3_scratch);
+          __ st_ptr(O3_scratch, Address(O0_argslot,  i * wordSize));
+          __ st_ptr(O2_scratch, Address(O1_destslot, i * wordSize));
         }
       } else {
-        // Save the first chunk, which is going to get overwritten.
-        switch (swap_bytes) {
-        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
-        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
-        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
-        default: ShouldNotReachHere();
+        // A rotate is actually pair of moves, with an "odd slot" (or pair)
+        // changing place with a series of other slots.
+        // First, push the "odd slot", which is going to get overwritten
+        switch (swap_slots) {
+        case 2 :  __ ld_ptr(Address(O0_argslot, 1 * wordSize), O4_scratch); // fall-thru
+        case 1 :  __ ld_ptr(Address(O0_argslot, 0 * wordSize), O3_scratch); break;
+        default:  ShouldNotReachHere();
         }
-
         if (rotate > 0) {
-          // Rorate upward.
-          __ sub(O0_argslot, swap_bytes, O0_argslot);
-#if ASSERT
-          {
-            // Verify that argslot > destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmp(O0_argslot, O1_destslot);
-            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
-            __ delayed()->nop();
-            __ stop("source must be above destination (upward rotation)");
-            __ bind(L_ok);
-          }
-#endif
-          // Work argslot down to destslot, copying contiguous data upwards.
-          // Pseudo-code:
+          // Here is rotate > 0:
+          // (low mem)                                          (high mem)
+          //     | dest:     more_slots...     | arg: odd_slot :arg+1 |
+          // =>
+          //     | dest: odd_slot | dest+1: more_slots...      :arg+1 |
+          // work argslot down to destslot, copying contiguous data upwards
+          // pseudo-code:
           //   argslot  = src_addr - swap_bytes
           //   destslot = dest_addr
-          //   while (argslot >= destslot) {
-          //     *(argslot + swap_bytes) = *(argslot + 0);
-          //     argslot--;
-          //   }
-          Label loop;
-          __ bind(loop);
-          __ ld_ptr(Address(O0_argslot, 0), G5_index);
-          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
-          __ sub(O0_argslot, wordSize, O0_argslot);
-          __ cmp(O0_argslot, O1_destslot);
-          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
-          __ delayed()->nop();  // FILLME
+          //   while (argslot >= destslot) *(argslot + swap_bytes) = *(argslot + 0), argslot--;
+          move_arg_slots_up(_masm,
+                            O1_destslot,
+                            Address(O0_argslot, 0),
+                            swap_slots,
+                            O0_argslot, O2_scratch);
         } else {
-          __ add(O0_argslot, swap_bytes, O0_argslot);
-#if ASSERT
-          {
-            // Verify that argslot < destslot, by at least swap_bytes.
-            Label L_ok;
-            __ cmp(O0_argslot, O1_destslot);
-            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
-            __ delayed()->nop();
-            __ stop("source must be above destination (upward rotation)");
-            __ bind(L_ok);
-          }
-#endif
-          // Work argslot up to destslot, copying contiguous data downwards.
-          // Pseudo-code:
+          // Here is the other direction, rotate < 0:
+          // (low mem)                                          (high mem)
+          //     | arg: odd_slot | arg+1: more_slots...       :dest+1 |
+          // =>
+          //     | arg:    more_slots...     | dest: odd_slot :dest+1 |
+          // work argslot up to destslot, copying contiguous data downwards
+          // pseudo-code:
           //   argslot  = src_addr + swap_bytes
           //   destslot = dest_addr
-          //   while (argslot >= destslot) {
-          //     *(argslot - swap_bytes) = *(argslot + 0);
-          //     argslot++;
-          //   }
-          Label loop;
-          __ bind(loop);
-          __ ld_ptr(Address(O0_argslot, 0), G5_index);
-          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
-          __ add(O0_argslot, wordSize, O0_argslot);
-          __ cmp(O0_argslot, O1_destslot);
-          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
-          __ delayed()->nop();  // FILLME
+          //   while (argslot <= destslot) *(argslot - swap_bytes) = *(argslot + 0), argslot++;
+          // dest_slot denotes an exclusive upper limit
+          int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
+          if (limit_bias != 0)
+            __ add(O1_destslot, - limit_bias * wordSize, O1_destslot);
+          move_arg_slots_down(_masm,
+                              Address(O0_argslot, swap_slots * wordSize),
+                              O1_destslot,
+                              -swap_slots,
+                              O0_argslot, O2_scratch);
+
+          __ sub(O1_destslot, swap_slots * wordSize, O1_destslot);
         }
-
-        // Store the original first chunk into the destination slot, now free.
-        switch (swap_bytes) {
-        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
-        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
-        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
-        default: ShouldNotReachHere();
+        // pop the original first chunk into the destination slot, now free
+        switch (swap_slots) {
+        case 2 :  __ st_ptr(O4_scratch, Address(O1_destslot, 1 * wordSize)); // fall-thru
+        case 1 :  __ st_ptr(O3_scratch, Address(O1_destslot, 0 * wordSize)); break;
+        default:  ShouldNotReachHere();
         }
       }
 
@@ -924,41 +1613,21 @@
   case _adapter_dup_args:
     {
       // 'argslot' is the position of the first argument to duplicate.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
       // 'stack_move' is negative number of words to duplicate.
-      Register G5_stack_move = G5_index;
-      __ ldsw(G3_amh_conversion, G5_stack_move);
-      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
-
-      // Remember the old Gargs (argslot[0]).
-      Register O1_oldarg = O1_scratch;
-      __ mov(Gargs, O1_oldarg);
-
-      // Move Gargs down to make room for dups.
-      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
-      __ add(Gargs, G5_stack_move, Gargs);
-
-      // Compute the new Gargs (argslot[0]).
-      Register O2_newarg = O2_scratch;
-      __ mov(Gargs, O2_newarg);
+      Register O1_stack_move = O1_scratch;
+      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
 
-      // Copy from oldarg[0...] down to newarg[0...]
-      // Pseude-code:
-      //   O1_oldarg  = old-Gargs
-      //   O2_newarg  = new-Gargs
-      //   O0_argslot = argslot
-      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
-      Label loop;
-      __ bind(loop);
-      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
-      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
-      __ add(O0_argslot, wordSize, O0_argslot);
-      __ add(O2_newarg,  wordSize, O2_newarg);
-      __ cmp(O2_newarg, O1_oldarg);
-      __ brx(Assembler::less, false, Assembler::pt, loop);
-      __ delayed()->nop();  // FILLME
+      if (VerifyMethodHandles) {
+        verify_argslots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, true,
+                        "copied argument(s) must fall within current frame");
+      }
+
+      // insert location is always the bottom of the argument list:
+      __ neg(O1_stack_move);
+      push_arg_slots(_masm, O0_argslot, O1_stack_move, O2_scratch, O3_scratch);
 
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -968,15 +1637,14 @@
   case _adapter_drop_args:
     {
       // 'argslot' is the position of the first argument to nuke.
-      __ ldsw(G3_amh_vmargslot, O0_argslot);
-      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
 
       // 'stack_move' is number of words to drop.
-      Register G5_stack_move = G5_index;
-      __ ldsw(G3_amh_conversion, G5_stack_move);
-      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+      Register O1_stack_move = O1_scratch;
+      load_stack_move(_masm, G3_amh_conversion, O1_stack_move);
 
-      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+      remove_arg_slots(_masm, O1_stack_move, O0_argslot, O2_scratch, O3_scratch, O4_scratch);
 
       __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
       __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
@@ -984,31 +1652,686 @@
     break;
 
   case _adapter_collect_args:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
-    break;
-
+  case _adapter_fold_args:
   case _adapter_spread_args:
     // Handled completely by optimized cases.
     __ stop("init_AdapterMethodHandle should not issue this");
     break;
 
-  case _adapter_opt_spread_0:
-  case _adapter_opt_spread_1:
-  case _adapter_opt_spread_more:
+  case _adapter_opt_collect_ref:
+  case _adapter_opt_collect_int:
+  case _adapter_opt_collect_long:
+  case _adapter_opt_collect_float:
+  case _adapter_opt_collect_double:
+  case _adapter_opt_collect_void:
+  case _adapter_opt_collect_0_ref:
+  case _adapter_opt_collect_1_ref:
+  case _adapter_opt_collect_2_ref:
+  case _adapter_opt_collect_3_ref:
+  case _adapter_opt_collect_4_ref:
+  case _adapter_opt_collect_5_ref:
+  case _adapter_opt_filter_S0_ref:
+  case _adapter_opt_filter_S1_ref:
+  case _adapter_opt_filter_S2_ref:
+  case _adapter_opt_filter_S3_ref:
+  case _adapter_opt_filter_S4_ref:
+  case _adapter_opt_filter_S5_ref:
+  case _adapter_opt_collect_2_S0_ref:
+  case _adapter_opt_collect_2_S1_ref:
+  case _adapter_opt_collect_2_S2_ref:
+  case _adapter_opt_collect_2_S3_ref:
+  case _adapter_opt_collect_2_S4_ref:
+  case _adapter_opt_collect_2_S5_ref:
+  case _adapter_opt_fold_ref:
+  case _adapter_opt_fold_int:
+  case _adapter_opt_fold_long:
+  case _adapter_opt_fold_float:
+  case _adapter_opt_fold_double:
+  case _adapter_opt_fold_void:
+  case _adapter_opt_fold_1_ref:
+  case _adapter_opt_fold_2_ref:
+  case _adapter_opt_fold_3_ref:
+  case _adapter_opt_fold_4_ref:
+  case _adapter_opt_fold_5_ref:
     {
-      // spread an array out into a group of arguments
-      __ unimplemented(entry_name(ek));
+      // Given a fresh incoming stack frame, build a new ricochet frame.
+      // On entry, TOS points at a return PC, and FP is the callers frame ptr.
+      // RSI/R13 has the caller's exact stack pointer, which we must also preserve.
+      // RCX contains an AdapterMethodHandle of the indicated kind.
+
+      // Relevant AMH fields:
+      // amh.vmargslot:
+      //   points to the trailing edge of the arguments
+      //   to filter, collect, or fold.  For a boxing operation,
+      //   it points just after the single primitive value.
+      // amh.argument:
+      //   recursively called MH, on |collect| arguments
+      // amh.vmtarget:
+      //   final destination MH, on return value, etc.
+      // amh.conversion.dest:
+      //   tells what is the type of the return value
+      //   (not needed here, since dest is also derived from ek)
+      // amh.conversion.vminfo:
+      //   points to the trailing edge of the return value
+      //   when the vmtarget is to be called; this is
+      //   equal to vmargslot + (retained ? |collect| : 0)
+
+      // Pass 0 or more argument slots to the recursive target.
+      int collect_count_constant = ek_adapter_opt_collect_count(ek);
+
+      // The collected arguments are copied from the saved argument list:
+      int collect_slot_constant = ek_adapter_opt_collect_slot(ek);
+
+      assert(ek_orig == _adapter_collect_args ||
+             ek_orig == _adapter_fold_args, "");
+      bool retain_original_args = (ek_orig == _adapter_fold_args);
+
+      // The return value is replaced (or inserted) at the 'vminfo' argslot.
+      // Sometimes we can compute this statically.
+      int dest_slot_constant = -1;
+      if (!retain_original_args)
+        dest_slot_constant = collect_slot_constant;
+      else if (collect_slot_constant >= 0 && collect_count_constant >= 0)
+        // We are preserving all the arguments, and the return value is prepended,
+        // so the return slot is to the left (above) the |collect| sequence.
+        dest_slot_constant = collect_slot_constant + collect_count_constant;
+
+      // Replace all those slots by the result of the recursive call.
+      // The result type can be one of ref, int, long, float, double, void.
+      // In the case of void, nothing is pushed on the stack after return.
+      BasicType dest = ek_adapter_opt_collect_type(ek);
+      assert(dest == type2wfield[dest], "dest is a stack slot type");
+      int dest_count = type2size[dest];
+      assert(dest_count == 1 || dest_count == 2 || (dest_count == 0 && dest == T_VOID), "dest has a size");
+
+      // Choose a return continuation.
+      EntryKind ek_ret = _adapter_opt_return_any;
+      if (dest != T_CONFLICT && OptimizeMethodHandles) {
+        switch (dest) {
+        case T_INT    : ek_ret = _adapter_opt_return_int;     break;
+        case T_LONG   : ek_ret = _adapter_opt_return_long;    break;
+        case T_FLOAT  : ek_ret = _adapter_opt_return_float;   break;
+        case T_DOUBLE : ek_ret = _adapter_opt_return_double;  break;
+        case T_OBJECT : ek_ret = _adapter_opt_return_ref;     break;
+        case T_VOID   : ek_ret = _adapter_opt_return_void;    break;
+        default       : ShouldNotReachHere();
+        }
+        if (dest == T_OBJECT && dest_slot_constant >= 0) {
+          EntryKind ek_try = EntryKind(_adapter_opt_return_S0_ref + dest_slot_constant);
+          if (ek_try <= _adapter_opt_return_LAST &&
+              ek_adapter_opt_return_slot(ek_try) == dest_slot_constant) {
+            ek_ret = ek_try;
+          }
+        }
+        assert(ek_adapter_opt_return_type(ek_ret) == dest, "");
+      }
+
+      // Already pushed:  ... keep1 | collect | keep2 |
+
+      // Push a few extra argument words, if we need them to store the return value.
+      {
+        int extra_slots = 0;
+        if (retain_original_args) {
+          extra_slots = dest_count;
+        } else if (collect_count_constant == -1) {
+          extra_slots = dest_count;  // collect_count might be zero; be generous
+        } else if (dest_count > collect_count_constant) {
+          extra_slots = (dest_count - collect_count_constant);
+        } else {
+          // else we know we have enough dead space in |collect| to repurpose for return values
+        }
+        if (extra_slots != 0) {
+          __ sub(SP, round_to(extra_slots, 2) * Interpreter::stackElementSize, SP);
+        }
+      }
+
+      // Set up Ricochet Frame.
+      __ mov(SP, O5_savedSP);  // record SP for the callee
+
+      // One extra (empty) slot for outgoing target MH (see Gargs computation below).
+      __ save_frame(2);  // Note: we need to add 2 slots since frame::memory_parameter_word_sp_offset is 23.
+
+      // Note: Gargs is live throughout the following, until we make our recursive call.
+      // And the RF saves a copy in L4_saved_args_base.
+
+      RicochetFrame::enter_ricochet_frame(_masm, G3_method_handle, Gargs,
+                                          entry(ek_ret)->from_interpreted_entry());
+
+      // Compute argument base:
+      // Set up Gargs for current frame, extra (empty) slot is for outgoing target MH (space reserved by save_frame above).
+      __ add(FP, STACK_BIAS - (1 * Interpreter::stackElementSize), Gargs);
+
+      // Now pushed:  ... keep1 | collect | keep2 | extra | [RF]
+
+#ifdef ASSERT
+      if (VerifyMethodHandles && dest != T_CONFLICT) {
+        BLOCK_COMMENT("verify AMH.conv.dest {");
+        extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O1_scratch);
+        Label L_dest_ok;
+        __ cmp(O1_scratch, (int) dest);
+        __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
+        __ delayed()->nop();
+        if (dest == T_INT) {
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt))) {
+              __ cmp(O1_scratch, (int) bt);
+              __ br(Assembler::equal, false, Assembler::pt, L_dest_ok);
+              __ delayed()->nop();
+            }
+          }
+        }
+        __ stop("bad dest in AMH.conv");
+        __ BIND(L_dest_ok);
+        BLOCK_COMMENT("} verify AMH.conv.dest");
+      }
+#endif //ASSERT
+
+      // Find out where the original copy of the recursive argument sequence begins.
+      Register O0_coll = O0_scratch;
+      {
+        RegisterOrConstant collect_slot = collect_slot_constant;
+        if (collect_slot_constant == -1) {
+          load_vmargslot(_masm, G3_amh_vmargslot, O1_scratch);
+          collect_slot = O1_scratch;
+        }
+        // collect_slot might be 0, but we need the move anyway.
+        __ add(RicochetFrame::L4_saved_args_base, __ argument_offset(collect_slot, collect_slot.register_or_noreg()), O0_coll);
+        // O0_coll now points at the trailing edge of |collect| and leading edge of |keep2|
+      }
+
+      // Replace the old AMH with the recursive MH.  (No going back now.)
+      // In the case of a boxing call, the recursive call is to a 'boxer' method,
+      // such as Integer.valueOf or Long.valueOf.  In the case of a filter
+      // or collect call, it will take one or more arguments, transform them,
+      // and return some result, to store back into argument_base[vminfo].
+      __ load_heap_oop(G3_amh_argument, G3_method_handle);
+      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O1_scratch, O2_scratch);
+
+      // Calculate |collect|, the number of arguments we are collecting.
+      Register O1_collect_count = O1_scratch;
+      RegisterOrConstant collect_count;
+      if (collect_count_constant < 0) {
+        __ load_method_handle_vmslots(O1_collect_count, G3_method_handle, O2_scratch);
+        collect_count = O1_collect_count;
+      } else {
+        collect_count = collect_count_constant;
+#ifdef ASSERT
+        if (VerifyMethodHandles) {
+          BLOCK_COMMENT("verify collect_count_constant {");
+          __ load_method_handle_vmslots(O3_scratch, G3_method_handle, O2_scratch);
+          Label L_count_ok;
+          __ cmp(O3_scratch, collect_count_constant);
+          __ br(Assembler::equal, false, Assembler::pt, L_count_ok);
+          __ delayed()->nop();
+          __ stop("bad vminfo in AMH.conv");
+          __ BIND(L_count_ok);
+          BLOCK_COMMENT("} verify collect_count_constant");
+        }
+#endif //ASSERT
+      }
+
+      // copy |collect| slots directly to TOS:
+      push_arg_slots(_masm, O0_coll, collect_count, O2_scratch, O3_scratch);
+      // Now pushed:  ... keep1 | collect | keep2 | RF... | collect |
+      // O0_coll still points at the trailing edge of |collect| and leading edge of |keep2|
+
+      // If necessary, adjust the saved arguments to make room for the eventual return value.
+      // Normal adjustment:  ... keep1 | +dest+ | -collect- | keep2 | RF... | collect |
+      // If retaining args:  ... keep1 | +dest+ |  collect  | keep2 | RF... | collect |
+      // In the non-retaining case, this might move keep2 either up or down.
+      // We don't have to copy the whole | RF... collect | complex,
+      // but we must adjust RF.saved_args_base.
+      // Also, from now on, we will forget about the original copy of |collect|.
+      // If we are retaining it, we will treat it as part of |keep2|.
+      // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
+
+      BLOCK_COMMENT("adjust trailing arguments {");
+      // Compare the sizes of |+dest+| and |-collect-|, which are opposed opening and closing movements.
+      int                open_count  = dest_count;
+      RegisterOrConstant close_count = collect_count_constant;
+      Register O1_close_count = O1_collect_count;
+      if (retain_original_args) {
+        close_count = constant(0);
+      } else if (collect_count_constant == -1) {
+        close_count = O1_collect_count;
+      }
+
+      // How many slots need moving?  This is simply dest_slot (0 => no |keep3|).
+      RegisterOrConstant keep3_count;
+      Register O2_keep3_count = O2_scratch;
+      if (dest_slot_constant < 0) {
+        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O2_keep3_count);
+        keep3_count = O2_keep3_count;
+      } else  {
+        keep3_count = dest_slot_constant;
+#ifdef ASSERT
+        if (VerifyMethodHandles && dest_slot_constant < 0) {
+          BLOCK_COMMENT("verify dest_slot_constant {");
+          extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O3_scratch);
+          Label L_vminfo_ok;
+          __ cmp(O3_scratch, dest_slot_constant);
+          __ br(Assembler::equal, false, Assembler::pt, L_vminfo_ok);
+          __ delayed()->nop();
+          __ stop("bad vminfo in AMH.conv");
+          __ BIND(L_vminfo_ok);
+          BLOCK_COMMENT("} verify dest_slot_constant");
+        }
+#endif //ASSERT
+      }
+
+      // tasks remaining:
+      bool move_keep3 = (!keep3_count.is_constant() || keep3_count.as_constant() != 0);
+      bool stomp_dest = (NOT_DEBUG(dest == T_OBJECT) DEBUG_ONLY(dest_count != 0));
+      bool fix_arg_base = (!close_count.is_constant() || open_count != close_count.as_constant());
+
+      // Old and new argument locations (based at slot 0).
+      // Net shift (&new_argv - &old_argv) is (close_count - open_count).
+      bool zero_open_count = (open_count == 0);  // remember this bit of info
+      if (move_keep3 && fix_arg_base) {
+        // It will be easier to have everything in one register:
+        if (close_count.is_register()) {
+          // Deduct open_count from close_count register to get a clean +/- value.
+          __ sub(close_count.as_register(), open_count, close_count.as_register());
+        } else {
+          close_count = close_count.as_constant() - open_count;
+        }
+        open_count = 0;
+      }
+      Register L4_old_argv = RicochetFrame::L4_saved_args_base;
+      Register O3_new_argv = O3_scratch;
+      if (fix_arg_base) {
+        __ add(L4_old_argv, __ argument_offset(close_count, O4_scratch), O3_new_argv,
+               -(open_count * Interpreter::stackElementSize));
+      }
+
+      // First decide if any actual data are to be moved.
+      // We can skip if (a) |keep3| is empty, or (b) the argument list size didn't change.
+      // (As it happens, all movements involve an argument list size change.)
+
+      // If there are variable parameters, use dynamic checks to skip around the whole mess.
+      Label L_done;
+      if (keep3_count.is_register()) {
+        __ tst(keep3_count.as_register());
+        __ br(Assembler::zero, false, Assembler::pn, L_done);
+        __ delayed()->nop();
+      }
+      if (close_count.is_register()) {
+        __ cmp(close_count.as_register(), open_count);
+        __ br(Assembler::equal, false, Assembler::pn, L_done);
+        __ delayed()->nop();
+      }
+
+      if (move_keep3 && fix_arg_base) {
+        bool emit_move_down = false, emit_move_up = false, emit_guard = false;
+        if (!close_count.is_constant()) {
+          emit_move_down = emit_guard = !zero_open_count;
+          emit_move_up   = true;
+        } else if (open_count != close_count.as_constant()) {
+          emit_move_down = (open_count > close_count.as_constant());
+          emit_move_up   = !emit_move_down;
+        }
+        Label L_move_up;
+        if (emit_guard) {
+          __ cmp(close_count.as_register(), open_count);
+          __ br(Assembler::greater, false, Assembler::pn, L_move_up);
+          __ delayed()->nop();
+        }
+
+        if (emit_move_down) {
+          // Move arguments down if |+dest+| > |-collect-|
+          // (This is rare, except when arguments are retained.)
+          // This opens space for the return value.
+          if (keep3_count.is_constant()) {
+            for (int i = 0; i < keep3_count.as_constant(); i++) {
+              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
+              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
+            }
+          } else {
+            // Live: O1_close_count, O2_keep3_count, O3_new_argv
+            Register argv_top = O0_scratch;
+            __ add(L4_old_argv, __ argument_offset(keep3_count, O4_scratch), argv_top);
+            move_arg_slots_down(_masm,
+                                Address(L4_old_argv, 0),  // beginning of old argv
+                                argv_top,                 // end of old argv
+                                close_count,              // distance to move down (must be negative)
+                                O4_scratch, G5_scratch);
+          }
+        }
+
+        if (emit_guard) {
+          __ ba(false, L_done);  // assumes emit_move_up is true also
+          __ delayed()->nop();
+          __ BIND(L_move_up);
+        }
+
+        if (emit_move_up) {
+          // Move arguments up if |+dest+| < |-collect-|
+          // (This is usual, except when |keep3| is empty.)
+          // This closes up the space occupied by the now-deleted collect values.
+          if (keep3_count.is_constant()) {
+            for (int i = keep3_count.as_constant() - 1; i >= 0; i--) {
+              __ ld_ptr(            Address(L4_old_argv, i * Interpreter::stackElementSize), O4_scratch);
+              __ st_ptr(O4_scratch, Address(O3_new_argv, i * Interpreter::stackElementSize)            );
+            }
+          } else {
+            Address argv_top(L4_old_argv, __ argument_offset(keep3_count, O4_scratch));
+            // Live: O1_close_count, O2_keep3_count, O3_new_argv
+            move_arg_slots_up(_masm,
+                              L4_old_argv,  // beginning of old argv
+                              argv_top,     // end of old argv
+                              close_count,  // distance to move up (must be positive)
+                              O4_scratch, G5_scratch);
+          }
+        }
+      }
+      __ BIND(L_done);
+
+      if (fix_arg_base) {
+        // adjust RF.saved_args_base
+        __ mov(O3_new_argv, RicochetFrame::L4_saved_args_base);
+      }
+
+      if (stomp_dest) {
+        // Stomp the return slot, so it doesn't hold garbage.
+        // This isn't strictly necessary, but it may help detect bugs.
+        __ set(RicochetFrame::RETURN_VALUE_PLACEHOLDER, O4_scratch);
+        __ st_ptr(O4_scratch, Address(RicochetFrame::L4_saved_args_base,
+                                      __ argument_offset(keep3_count, keep3_count.register_or_noreg())));  // uses O2_keep3_count
+      }
+      BLOCK_COMMENT("} adjust trailing arguments");
+
+      BLOCK_COMMENT("do_recursive_call");
+      __ mov(SP, O5_savedSP);  // record SP for the callee
+      __ set(ExternalAddress(SharedRuntime::ricochet_blob()->bounce_addr() - frame::pc_return_offset), O7);
+      // The globally unique bounce address has two purposes:
+      // 1. It helps the JVM recognize this frame (frame::is_ricochet_frame).
+      // 2. When returned to, it cuts back the stack and redirects control flow
+      //    to the return handler.
+      // The return handler will further cut back the stack when it takes
+      // down the RF.  Perhaps there is a way to streamline this further.
+
+      // State during recursive call:
+      // ... keep1 | dest | dest=42 | keep3 | RF... | collect | bounce_pc |
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
     }
     break;
 
-  case _adapter_flyby:
-  case _adapter_ricochet:
-    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+  case _adapter_opt_return_ref:
+  case _adapter_opt_return_int:
+  case _adapter_opt_return_long:
+  case _adapter_opt_return_float:
+  case _adapter_opt_return_double:
+  case _adapter_opt_return_void:
+  case _adapter_opt_return_S0_ref:
+  case _adapter_opt_return_S1_ref:
+  case _adapter_opt_return_S2_ref:
+  case _adapter_opt_return_S3_ref:
+  case _adapter_opt_return_S4_ref:
+  case _adapter_opt_return_S5_ref:
+    {
+      BasicType dest_type_constant = ek_adapter_opt_return_type(ek);
+      int       dest_slot_constant = ek_adapter_opt_return_slot(ek);
+
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+
+      if (dest_slot_constant == -1) {
+        // The current stub is a general handler for this dest_type.
+        // It can be called from _adapter_opt_return_any below.
+        // Stash the address in a little table.
+        assert((dest_type_constant & CONV_TYPE_MASK) == dest_type_constant, "oob");
+        address return_handler = __ pc();
+        _adapter_return_handlers[dest_type_constant] = return_handler;
+        if (dest_type_constant == T_INT) {
+          // do the subword types too
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt)) &&
+                _adapter_return_handlers[bt] == NULL) {
+              _adapter_return_handlers[bt] = return_handler;
+            }
+          }
+        }
+      }
+
+      // On entry to this continuation handler, make Gargs live again.
+      __ mov(RicochetFrame::L4_saved_args_base, Gargs);
+
+      Register O7_temp   = O7;
+      Register O5_vminfo = O5;
+
+      RegisterOrConstant dest_slot = dest_slot_constant;
+      if (dest_slot_constant == -1) {
+        extract_conversion_vminfo(_masm, RicochetFrame::L5_conversion, O5_vminfo);
+        dest_slot = O5_vminfo;
+      }
+      // Store the result back into the argslot.
+      // This code uses the interpreter calling sequence, in which the return value
+      // is usually left in the TOS register, as defined by InterpreterMacroAssembler::pop.
+      // There are certain irregularities with floating point values, which can be seen
+      // in TemplateInterpreterGenerator::generate_return_entry_for.
+      move_return_value(_masm, dest_type_constant, __ argument_address(dest_slot, O7_temp));
+
+      RicochetFrame::leave_ricochet_frame(_masm, G3_method_handle, I5_savedSP, I7);
+
+      // Load the final target and go.
+      if (VerifyMethodHandles)  verify_method_handle(_masm, G3_method_handle, O0_scratch, O1_scratch);
+      __ restore(I5_savedSP, G0, SP);
+      __ jump_to_method_handle_entry(G3_method_handle, O0_scratch);
+      __ illtrap(0);
+    }
+    break;
+
+  case _adapter_opt_return_any:
+    {
+      Register O7_temp      = O7;
+      Register O5_dest_type = O5;
+
+      if (VerifyMethodHandles)  RicochetFrame::verify_clean(_masm);
+      extract_conversion_dest_type(_masm, RicochetFrame::L5_conversion, O5_dest_type);
+      __ set(ExternalAddress((address) &_adapter_return_handlers[0]), O7_temp);
+      __ sll_ptr(O5_dest_type, LogBytesPerWord, O5_dest_type);
+      __ ld_ptr(O7_temp, O5_dest_type, O7_temp);
+
+#ifdef ASSERT
+      { Label L_ok;
+        __ br_notnull(O7_temp, false, Assembler::pt, L_ok);
+        __ delayed()->nop();
+        __ stop("bad method handle return");
+        __ BIND(L_ok);
+      }
+#endif //ASSERT
+      __ JMP(O7_temp, 0);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _adapter_opt_spread_0:
+  case _adapter_opt_spread_1_ref:
+  case _adapter_opt_spread_2_ref:
+  case _adapter_opt_spread_3_ref:
+  case _adapter_opt_spread_4_ref:
+  case _adapter_opt_spread_5_ref:
+  case _adapter_opt_spread_ref:
+  case _adapter_opt_spread_byte:
+  case _adapter_opt_spread_char:
+  case _adapter_opt_spread_short:
+  case _adapter_opt_spread_int:
+  case _adapter_opt_spread_long:
+  case _adapter_opt_spread_float:
+  case _adapter_opt_spread_double:
+    {
+      // spread an array out into a group of arguments
+      int  length_constant    = ek_adapter_opt_spread_count(ek);
+      bool length_can_be_zero = (length_constant == 0);
+      if (length_constant < 0) {
+        // some adapters with variable length must handle the zero case
+        if (!OptimizeMethodHandles ||
+            ek_adapter_opt_spread_type(ek) != T_OBJECT)
+          length_can_be_zero = true;
+      }
+
+      // find the address of the array argument
+      load_vmargslot(_masm, G3_amh_vmargslot, O0_argslot);
+      __ add(__ argument_address(O0_argslot, O0_argslot), O0_argslot);
+
+      // O0_argslot points both to the array and to the first output arg
+      Address vmarg = Address(O0_argslot, 0);
+
+      // Get the array value.
+      Register  O1_array       = O1_scratch;
+      Register  O2_array_klass = O2_scratch;
+      BasicType elem_type      = ek_adapter_opt_spread_type(ek);
+      int       elem_slots     = type2size[elem_type];  // 1 or 2
+      int       array_slots    = 1;  // array is always a T_OBJECT
+      int       length_offset  = arrayOopDesc::length_offset_in_bytes();
+      int       elem0_offset   = arrayOopDesc::base_offset_in_bytes(elem_type);
+      __ ld_ptr(vmarg, O1_array);
+
+      Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
+      if (length_can_be_zero) {
+        // handle the null pointer case, if zero is allowed
+        Label L_skip;
+        if (length_constant < 0) {
+          load_conversion_vminfo(_masm, G3_amh_conversion, O3_scratch);
+          __ br_zero(Assembler::notZero, false, Assembler::pn, O3_scratch, L_skip);
+          __ delayed()->nop();
+        }
+        __ br_null(O1_array, false, Assembler::pn, L_array_is_empty);
+        __ delayed()->nop();
+        __ BIND(L_skip);
+      }
+      __ null_check(O1_array, oopDesc::klass_offset_in_bytes());
+      __ load_klass(O1_array, O2_array_klass);
+
+      // Check the array type.
+      Register O3_klass = O3_scratch;
+      __ load_heap_oop(G3_amh_argument, O3_klass);  // this is a Class object!
+      load_klass_from_Class(_masm, O3_klass, O4_scratch, G5_scratch);
+
+      Label L_ok_array_klass, L_bad_array_klass, L_bad_array_length;
+      __ check_klass_subtype(O2_array_klass, O3_klass, O4_scratch, G5_scratch, L_ok_array_klass);
+      // If we get here, the type check failed!
+      __ ba(false, L_bad_array_klass);
+      __ delayed()->nop();
+      __ BIND(L_ok_array_klass);
+
+      // Check length.
+      if (length_constant >= 0) {
+        __ ldsw(Address(O1_array, length_offset), O4_scratch);
+        __ cmp(O4_scratch, length_constant);
+      } else {
+        Register O3_vminfo = O3_scratch;
+        load_conversion_vminfo(_masm, G3_amh_conversion, O3_vminfo);
+        __ ldsw(Address(O1_array, length_offset), O4_scratch);
+        __ cmp(O3_vminfo, O4_scratch);
+      }
+      __ br(Assembler::notEqual, false, Assembler::pn, L_bad_array_length);
+      __ delayed()->nop();
+
+      Register O2_argslot_limit = O2_scratch;
+
+      // Array length checks out.  Now insert any required stack slots.
+      if (length_constant == -1) {
+        // Form a pointer to the end of the affected region.
+        __ add(O0_argslot, Interpreter::stackElementSize, O2_argslot_limit);
+        // 'stack_move' is negative number of words to insert
+        // This number already accounts for elem_slots.
+        Register O3_stack_move = O3_scratch;
+        load_stack_move(_masm, G3_amh_conversion, O3_stack_move);
+        __ cmp(O3_stack_move, 0);
+        assert(stack_move_unit() < 0, "else change this comparison");
+        __ br(Assembler::less, false, Assembler::pn, L_insert_arg_space);
+        __ delayed()->nop();
+        __ br(Assembler::equal, false, Assembler::pn, L_copy_args);
+        __ delayed()->nop();
+        // single argument case, with no array movement
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
+                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+        __ ba(false, L_args_done);  // no spreading to do
+        __ delayed()->nop();
+        __ BIND(L_insert_arg_space);
+        // come here in the usual case, stack_move < 0 (2 or more spread arguments)
+        // Live: O1_array, O2_argslot_limit, O3_stack_move
+        insert_arg_slots(_masm, O3_stack_move,
+                         O0_argslot, O4_scratch, G5_scratch, O1_scratch);
+        // reload from rdx_argslot_limit since rax_argslot is now decremented
+        __ ld_ptr(Address(O2_argslot_limit, -Interpreter::stackElementSize), O1_array);
+      } else if (length_constant >= 1) {
+        int new_slots = (length_constant * elem_slots) - array_slots;
+        insert_arg_slots(_masm, new_slots * stack_move_unit(),
+                         O0_argslot, O2_scratch, O3_scratch, O4_scratch);
+      } else if (length_constant == 0) {
+        __ BIND(L_array_is_empty);
+        remove_arg_slots(_masm, -stack_move_unit() * array_slots,
+                         O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+      } else {
+        ShouldNotReachHere();
+      }
+
+      // Copy from the array to the new slots.
+      // Note: Stack change code preserves integrity of O0_argslot pointer.
+      // So even after slot insertions, O0_argslot still points to first argument.
+      // Beware:  Arguments that are shallow on the stack are deep in the array,
+      // and vice versa.  So a downward-growing stack (the usual) has to be copied
+      // elementwise in reverse order from the source array.
+      __ BIND(L_copy_args);
+      if (length_constant == -1) {
+        // [O0_argslot, O2_argslot_limit) is the area we are inserting into.
+        // Array element [0] goes at O0_argslot_limit[-wordSize].
+        Register O1_source = O1_array;
+        __ add(Address(O1_array, elem0_offset), O1_source);
+        Register O4_fill_ptr = O4_scratch;
+        __ mov(O2_argslot_limit, O4_fill_ptr);
+        Label L_loop;
+        __ BIND(L_loop);
+        __ add(O4_fill_ptr, -Interpreter::stackElementSize * elem_slots, O4_fill_ptr);
+        move_typed_arg(_masm, elem_type, true,
+                       Address(O1_source, 0), Address(O4_fill_ptr, 0),
+                       O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+        __ add(O1_source, type2aelembytes(elem_type), O1_source);
+        __ cmp(O4_fill_ptr, O0_argslot);
+        __ brx(Assembler::greaterUnsigned, false, Assembler::pt, L_loop);
+        __ delayed()->nop();  // FILLME
+      } else if (length_constant == 0) {
+        // nothing to copy
+      } else {
+        int elem_offset = elem0_offset;
+        int slot_offset = length_constant * Interpreter::stackElementSize;
+        for (int index = 0; index < length_constant; index++) {
+          slot_offset -= Interpreter::stackElementSize * elem_slots;  // fill backward
+          move_typed_arg(_masm, elem_type, true,
+                         Address(O1_array, elem_offset), Address(O0_argslot, slot_offset),
+                         O2_scratch);  // must be an even register for !_LP64 long moves (uses O2/O3)
+          elem_offset += type2aelembytes(elem_type);
+        }
+      }
+      __ BIND(L_args_done);
+
+      // Arguments are spread.  Move to next method handle.
+      __ load_heap_oop(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+
+      __ BIND(L_bad_array_klass);
+      assert(!vmarg.uses(O2_required), "must be different registers");
+      __ load_heap_oop(Address(O2_array_klass, java_mirror_offset), O2_required);  // required class
+      __ ld_ptr(       vmarg,                                       O1_actual);    // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
+      __ delayed()->mov(Bytecodes::_aaload,                         O0_code);      // who is complaining?
+
+      __ bind(L_bad_array_length);
+      assert(!vmarg.uses(O2_required), "must be different registers");
+      __ mov(   G3_method_handle,                O2_required);  // required class
+      __ ld_ptr(vmarg,                           O1_actual);    // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
+      __ delayed()->mov(Bytecodes::_arraylength, O0_code);      // who is complaining?
+    }
     break;
 
   default:
+    DEBUG_ONLY(tty->print_cr("bad ek=%d (%s)", (int)ek, entry_name(ek)));
     ShouldNotReachHere();
   }
+  BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
 
   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
   __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Platform-specific definitions for method handles.
+// These definitions are inlined into class MethodHandles.
+
+// Adapters
+enum /* platform_dependent_constants */ {
+  adapter_code_size = NOT_LP64(22000 DEBUG_ONLY(+ 40000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
+};
+
+public:
+
+class RicochetFrame : public ResourceObj {
+  friend class MethodHandles;
+
+ private:
+  /*
+    RF field            x86                 SPARC
+    sender_pc           *(rsp+0)            I7-0x8
+    sender_link         rbp                 I6+BIAS
+    exact_sender_sp     rsi/r13             I5_savedSP
+    conversion          *(rcx+&amh_conv)    L5_conv
+    saved_args_base     rax                 L4_sab (cf. Gargs = G4)
+    saved_args_layout   #NULL               L3_sal
+    saved_target        *(rcx+&mh_vmtgt)    L2_stgt
+    continuation        #STUB_CON           L1_cont
+   */
+  static const Register L1_continuation     ;  // what to do when control gets back here
+  static const Register L2_saved_target     ;  // target method handle to invoke on saved_args
+  static const Register L3_saved_args_layout;  // caching point for MethodTypeForm.vmlayout cookie
+  static const Register L4_saved_args_base  ;  // base of pushed arguments (slot 0, arg N) (-3)
+  static const Register L5_conversion       ;  // misc. information from original AdapterMethodHandle (-2)
+
+  frame _fr;
+
+  RicochetFrame(const frame& fr) : _fr(fr) { }
+
+  intptr_t* register_addr(Register reg) const  {
+    assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree");
+    return _fr.register_addr(reg);
+  }
+  intptr_t  register_value(Register reg) const { return *register_addr(reg); }
+
+ public:
+  intptr_t* continuation() const        { return (intptr_t*) register_value(L1_continuation); }
+  oop       saved_target() const        { return (oop)       register_value(L2_saved_target); }
+  oop       saved_args_layout() const   { return (oop)       register_value(L3_saved_args_layout); }
+  intptr_t* saved_args_base() const     { return (intptr_t*) register_value(L4_saved_args_base); }
+  intptr_t  conversion() const          { return             register_value(L5_conversion); }
+  intptr_t* exact_sender_sp() const     { return (intptr_t*) register_value(I5_savedSP); }
+  intptr_t* sender_link() const         { return _fr.sender_sp(); }  // XXX
+  address   sender_pc() const           { return _fr.sender_pc(); }
+
+  // This value is not used for much, but it apparently must be nonzero.
+  static int frame_size_in_bytes()              { return wordSize * 4; }
+
+  intptr_t* extended_sender_sp() const  { return saved_args_base(); }
+
+  intptr_t  return_value_slot_number() const {
+    return adapter_conversion_vminfo(conversion());
+  }
+  BasicType return_value_type() const {
+    return adapter_conversion_dest_type(conversion());
+  }
+  bool has_return_value_slot() const {
+    return return_value_type() != T_VOID;
+  }
+  intptr_t* return_value_slot_addr() const {
+    assert(has_return_value_slot(), "");
+    return saved_arg_slot_addr(return_value_slot_number());
+  }
+  intptr_t* saved_target_slot_addr() const {
+    return saved_arg_slot_addr(saved_args_length());
+  }
+  intptr_t* saved_arg_slot_addr(int slot) const {
+    assert(slot >= 0, "");
+    return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
+  }
+
+  jint      saved_args_length() const;
+  jint      saved_arg_offset(int arg) const;
+
+  // GC interface
+  oop*  saved_target_addr()                     { return (oop*)register_addr(L2_saved_target); }
+  oop*  saved_args_layout_addr()                { return (oop*)register_addr(L3_saved_args_layout); }
+
+  oop  compute_saved_args_layout(bool read_cache, bool write_cache);
+
+#ifdef ASSERT
+  // The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
+  enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
+  static const Register L0_magic_number_1   ;  // cookie for debugging, at start of RSA
+  static Address magic_number_2_addr()  { return Address(L4_saved_args_base, -wordSize); }
+  intptr_t magic_number_1() const       { return register_value(L0_magic_number_1); }
+  intptr_t magic_number_2() const       { return saved_args_base()[-1]; }
+#endif //ASSERT
+
+ public:
+  enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
+
+  void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
+
+  static void generate_ricochet_blob(MacroAssembler* _masm,
+                                     // output params:
+                                     int* bounce_offset,
+                                     int* exception_offset,
+                                     int* frame_size_in_words);
+
+  static void enter_ricochet_frame(MacroAssembler* _masm,
+                                   Register recv_reg,
+                                   Register argv_reg,
+                                   address return_handler);
+
+  static void leave_ricochet_frame(MacroAssembler* _masm,
+                                   Register recv_reg,
+                                   Register new_sp_reg,
+                                   Register sender_pc_reg);
+
+  static RicochetFrame* from_frame(const frame& fr) {
+    RicochetFrame* rf = new RicochetFrame(fr);
+    rf->verify();
+    return rf;
+  }
+
+  static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
+};
+
+// Additional helper methods for MethodHandles code generation:
+public:
+  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
+  static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg);
+  static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
+  static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
+
+  static void load_stack_move(MacroAssembler* _masm,
+                              Address G3_amh_conversion,
+                              Register G5_stack_move);
+
+  static void insert_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg, Register temp3_reg);
+
+  static void remove_arg_slots(MacroAssembler* _masm,
+                               RegisterOrConstant arg_slots,
+                               Register argslot_reg,
+                               Register temp_reg, Register temp2_reg, Register temp3_reg);
+
+  static void push_arg_slots(MacroAssembler* _masm,
+                             Register argslot_reg,
+                             RegisterOrConstant slot_count,
+                             Register temp_reg, Register temp2_reg);
+
+  static void move_arg_slots_up(MacroAssembler* _masm,
+                                Register bottom_reg,  // invariant
+                                Address  top_addr,    // can use temp_reg
+                                RegisterOrConstant positive_distance_in_slots,
+                                Register temp_reg, Register temp2_reg);
+
+  static void move_arg_slots_down(MacroAssembler* _masm,
+                                  Address  bottom_addr,  // can use temp_reg
+                                  Register top_reg,      // invariant
+                                  RegisterOrConstant negative_distance_in_slots,
+                                  Register temp_reg, Register temp2_reg);
+
+  static void move_typed_arg(MacroAssembler* _masm,
+                             BasicType type, bool is_element,
+                             Address value_src, Address slot_dest,
+                             Register temp_reg);
+
+  static void move_return_value(MacroAssembler* _masm, BasicType type,
+                                Address return_slot);
+
+  static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
+                             Register temp_reg,
+                             const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_argslots(MacroAssembler* _masm,
+                              RegisterOrConstant argslot_count,
+                              Register argslot_reg,
+                              Register temp_reg,
+                              Register temp2_reg,
+                              bool negate_argslot,
+                              const char* error_message) NOT_DEBUG_RETURN;
+
+  static void verify_stack_move(MacroAssembler* _masm,
+                                RegisterOrConstant arg_slots,
+                                int direction) NOT_DEBUG_RETURN;
+
+  static void verify_klass(MacroAssembler* _masm,
+                           Register obj_reg, KlassHandle klass,
+                           Register temp_reg, Register temp2_reg,
+                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
+
+  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
+                                   Register temp_reg, Register temp2_reg) {
+    verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
+                 temp_reg, temp2_reg,
+                 "reference is a MH");
+  }
+
+  // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+  // Takes care of special dispatch from single stepping too.
+  static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2);
+
+  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
--- a/hotspot/src/cpu/sparc/vm/registerMap_sparc.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/registerMap_sparc.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 // machine-dependent implemention for register maps
   friend class frame;
+  friend class MethodHandles;
 
  private:
   intptr_t* _window;         // register window save area (for L and I regs)
--- a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
 
 #define __ masm->
 
-ExceptionBlob      *OptoRuntime::_exception_blob;
-
 //------------------------------ generate_exception_blob ---------------------------
 // creates exception blob at the end
 // Using exception blob, this code is jumped from a compiled method.
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -47,18 +47,6 @@
 
 #define __ masm->
 
-#ifdef COMPILER2
-UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
-#endif // COMPILER2
-
-DeoptimizationBlob* SharedRuntime::_deopt_blob;
-SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*        SharedRuntime::_wrong_method_blob;
-RuntimeStub*        SharedRuntime::_ic_miss_blob;
-RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
 
 class RegisterSaver {
 
@@ -3492,7 +3480,7 @@
 // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
 // Tricky, tricky, tricky...
 
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3587,7 +3575,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3677,35 +3665,3 @@
   // frame_size_words or bytes??
   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
 }
-
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                             "wrong_method_stub");
-
-  _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_deopt_blob();
-
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Aug 17 22:47:12 2011 -0700
@@ -425,7 +425,7 @@
 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
 // This class is usable for mis-aligned loads as happen in I2C adapters.
 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
-                   R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31 );
+                   R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
 %}
 
 //----------DEFINITION BLOCK---------------------------------------------------
@@ -1326,17 +1326,17 @@
 
   // --------------------------------------
   // Check for float->int copy; requires a trip through memory
-  if( src_first_rc == rc_float && dst_first_rc == rc_int ) {
+  if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
     int offset = frame::register_save_words*wordSize;
-    if( cbuf ) {
+    if (cbuf) {
       emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
       impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
       impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
       emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
     }
 #ifndef PRODUCT
-    else if( !do_size ) {
-      if( size != 0 ) st->print("\n\t");
+    else if (!do_size) {
+      if (size != 0) st->print("\n\t");
       st->print(  "SUB    R_SP,16,R_SP\n");
       impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
       impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
@@ -1346,6 +1346,21 @@
     size += 16;
   }
 
+  // Check for float->int copy on T4
+  if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
+    // Further check for aligned-adjacent pair, so we can use a double move
+    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
+      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
+    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
+  }
+  // Check for int->float copy on T4
+  if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
+    // Further check for aligned-adjacent pair, so we can use a double move
+    if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
+      return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
+    size  =  impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
+  }
+
   // --------------------------------------
   // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
   // In such cases, I have to do the big-endian swap.  For aligned targets, the
@@ -8164,215 +8179,58 @@
   ins_pipe( cadd_cmpltmask );
 %}
 
-//----------Arithmetic Conversion Instructions---------------------------------
-// The conversions operations are all Alpha sorted.  Please keep it that way!
-
-instruct convD2F_reg(regF dst, regD src) %{
-  match(Set dst (ConvD2F src));
-  size(4);
-  format %{ "FDTOS  $src,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
-  ins_encode(form3_opf_rs2D_rdF(src, dst));
-  ins_pipe(fcvtD2F);
-%}
-
-
-// Convert a double to an int in a float register.
-// If the double is a NAN, stuff a zero in instead.
-instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FDTOI  $src,$dst\t! convert in delay slot\n\t"
-            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
-            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_d2i_helper(src,dst));
-  ins_pipe(fcvtD2I);
-%}
-
-instruct convD2I_reg(stackSlotI dst, regD src) %{
-  match(Set dst (ConvD2I src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regF tmp;
-    convD2I_helper(tmp, src);
-    regF_to_stkI(dst, tmp);
-  %}
-%}
-
-// Convert a double to a long in a double register.
-// If the double is a NAN, stuff a zero in instead.
-instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FDTOX  $src,$dst\t! convert in delay slot\n\t"
-            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
-            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_d2l_helper(src,dst));
-  ins_pipe(fcvtD2L);
-%}
-
-
-// Double to Long conversion
-instruct convD2L_reg(stackSlotL dst, regD src) %{
-  match(Set dst (ConvD2L src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regD tmp;
-    convD2L_helper(tmp, src);
-    regD_to_stkL(dst, tmp);
-  %}
-%}
-
-
-instruct convF2D_reg(regD dst, regF src) %{
-  match(Set dst (ConvF2D src));
-  format %{ "FSTOD  $src,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
-  ins_encode(form3_opf_rs2F_rdD(src, dst));
-  ins_pipe(fcvtF2D);
-%}
-
-
-instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FSTOI  $src,$dst\t! convert in delay slot\n\t"
-            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
-            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_f2i_helper(src,dst));
-  ins_pipe(fcvtF2I);
-%}
-
-instruct convF2I_reg(stackSlotI dst, regF src) %{
-  match(Set dst (ConvF2I src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regF tmp;
-    convF2I_helper(tmp, src);
-    regF_to_stkI(dst, tmp);
-  %}
-%}
-
-
-instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
-  effect(DEF dst, USE src, KILL fcc0);
-  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
-            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
-            "FSTOX  $src,$dst\t! convert in delay slot\n\t"
-            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
-            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
-      "skip:" %}
-  ins_encode(form_f2l_helper(src,dst));
-  ins_pipe(fcvtF2L);
-%}
-
-// Float to Long conversion
-instruct convF2L_reg(stackSlotL dst, regF src) %{
-  match(Set dst (ConvF2L src));
-  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
-  expand %{
-    regD tmp;
-    convF2L_helper(tmp, src);
-    regD_to_stkL(dst, tmp);
-  %}
-%}
-
-
-instruct convI2D_helper(regD dst, regF tmp) %{
-  effect(USE tmp, DEF dst);
-  format %{ "FITOD  $tmp,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
-  ins_encode(form3_opf_rs2F_rdD(tmp, dst));
-  ins_pipe(fcvtI2D);
-%}
-
-instruct convI2D_reg(stackSlotI src, regD dst) %{
-  match(Set dst (ConvI2D src));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  expand %{
-    regF tmp;
-    stkI_to_regF( tmp, src);
-    convI2D_helper( dst, tmp);
-  %}
-%}
-
-instruct convI2D_mem( regD_low dst, memory mem ) %{
-  match(Set dst (ConvI2D (LoadI mem)));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDF    $mem,$dst\n\t"
-            "FITOD  $dst,$dst" %}
-  opcode(Assembler::ldf_op3, Assembler::fitod_opf);
-  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
-  ins_pipe(floadF_mem);
-%}
-
-
-instruct convI2F_helper(regF dst, regF tmp) %{
-  effect(DEF dst, USE tmp);
-  format %{ "FITOS  $tmp,$dst" %}
-  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
-  ins_encode(form3_opf_rs2F_rdF(tmp, dst));
-  ins_pipe(fcvtI2F);
-%}
-
-instruct convI2F_reg( regF dst, stackSlotI src ) %{
-  match(Set dst (ConvI2F src));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  expand %{
-    regF tmp;
-    stkI_to_regF(tmp,src);
-    convI2F_helper(dst, tmp);
-  %}
-%}
-
-instruct convI2F_mem( regF dst, memory mem ) %{
-  match(Set dst (ConvI2F (LoadI mem)));
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDF    $mem,$dst\n\t"
-            "FITOS  $dst,$dst" %}
-  opcode(Assembler::ldf_op3, Assembler::fitos_opf);
-  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
-  ins_pipe(floadF_mem);
-%}
-
-
-instruct convI2L_reg(iRegL dst, iRegI src) %{
-  match(Set dst (ConvI2L src));
-  size(4);
-  format %{ "SRA    $src,0,$dst\t! int->long" %}
-  opcode(Assembler::sra_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+
+//-----------------------------------------------------------------
+// Direct raw moves between float and general registers using VIS3.
+
+//  ins_pipe(faddF_reg);
+instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveF2I src));
+
+  format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
+  ins_encode %{
+    __ movstouw($src$$FloatRegister, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveI2F src));
+
+  format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
+  ins_encode %{
+    __ movwtos($src$$Register, $dst$$FloatRegister);
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
-// Zero-extend convert int to long
-instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
-  match(Set dst (AndL (ConvI2L src) mask) );
-  size(4);
-  format %{ "SRL    $src,0,$dst\t! zero-extend int to long" %}
-  opcode(Assembler::srl_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveD2L src));
+
+  format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
+  ins_encode %{
+    __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
-// Zero-extend long
-instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
-  match(Set dst (AndL src mask) );
-  size(4);
-  format %{ "SRL    $src,0,$dst\t! zero-extend long" %}
-  opcode(Assembler::srl_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (MoveL2D src));
+
+  format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
+  ins_encode %{
+    __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
+  %}
   ins_pipe(ialu_reg_reg);
 %}
 
+
+// Raw moves between float and general registers using stack.
+
 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
   match(Set dst (MoveF2I src));
   effect(DEF dst, USE src);
@@ -8427,7 +8285,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STF   $src,$dst\t!MoveF2I" %}
+  format %{ "STF   $src,$dst\t! MoveF2I" %}
   opcode(Assembler::stf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
   ins_pipe(fstoreF_stk_reg);
@@ -8439,7 +8297,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STW    $src,$dst\t!MoveI2F" %}
+  format %{ "STW    $src,$dst\t! MoveI2F" %}
   opcode(Assembler::stw_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_reg);
@@ -8451,7 +8309,7 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STDF   $src,$dst\t!MoveD2L" %}
+  format %{ "STDF   $src,$dst\t! MoveD2L" %}
   opcode(Assembler::stdf_op3);
   ins_encode(simple_form3_mem_reg(dst, src));
   ins_pipe(fstoreD_stk_reg);
@@ -8463,13 +8321,290 @@
   ins_cost(MEMORY_REF_COST);
 
   size(4);
-  format %{ "STX    $src,$dst\t!MoveL2D" %}
+  format %{ "STX    $src,$dst\t! MoveL2D" %}
   opcode(Assembler::stx_op3);
   ins_encode(simple_form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_reg);
 %}
 
 
+//----------Arithmetic Conversion Instructions---------------------------------
+// The conversions operations are all Alpha sorted.  Please keep it that way!
+
+instruct convD2F_reg(regF dst, regD src) %{
+  match(Set dst (ConvD2F src));
+  size(4);
+  format %{ "FDTOS  $src,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
+  ins_encode(form3_opf_rs2D_rdF(src, dst));
+  ins_pipe(fcvtD2F);
+%}
+
+
+// Convert a double to an int in a float register.
+// If the double is a NAN, stuff a zero in instead.
+instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FDTOI  $src,$dst\t! convert in delay slot\n\t"
+            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
+            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_d2i_helper(src,dst));
+  ins_pipe(fcvtD2I);
+%}
+
+instruct convD2I_stk(stackSlotI dst, regD src) %{
+  match(Set dst (ConvD2I src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convD2I_helper(tmp, src);
+    regF_to_stkI(dst, tmp);
+  %}
+%}
+
+instruct convD2I_reg(iRegI dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvD2I src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convD2I_helper(tmp, src);
+    MoveF2I_reg_reg(dst, tmp);
+  %}
+%}
+
+
+// Convert a double to a long in a double register.
+// If the double is a NAN, stuff a zero in instead.
+instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPd  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FDTOX  $src,$dst\t! convert in delay slot\n\t"
+            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
+            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_d2l_helper(src,dst));
+  ins_pipe(fcvtD2L);
+%}
+
+instruct convD2L_stk(stackSlotL dst, regD src) %{
+  match(Set dst (ConvD2L src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convD2L_helper(tmp, src);
+    regD_to_stkL(dst, tmp);
+  %}
+%}
+
+instruct convD2L_reg(iRegL dst, regD src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvD2L src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convD2L_helper(tmp, src);
+    MoveD2L_reg_reg(dst, tmp);
+  %}
+%}
+
+
+instruct convF2D_reg(regD dst, regF src) %{
+  match(Set dst (ConvF2D src));
+  format %{ "FSTOD  $src,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
+  ins_encode(form3_opf_rs2F_rdD(src, dst));
+  ins_pipe(fcvtF2D);
+%}
+
+
+// Convert a float to an int in a float register.
+// If the float is a NAN, stuff a zero in instead.
+instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FSTOI  $src,$dst\t! convert in delay slot\n\t"
+            "FITOS  $dst,$dst\t! change NaN/max-int to valid float\n\t"
+            "FSUBs  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_f2i_helper(src,dst));
+  ins_pipe(fcvtF2I);
+%}
+
+instruct convF2I_stk(stackSlotI dst, regF src) %{
+  match(Set dst (ConvF2I src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convF2I_helper(tmp, src);
+    regF_to_stkI(dst, tmp);
+  %}
+%}
+
+instruct convF2I_reg(iRegI dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvF2I src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regF tmp;
+    convF2I_helper(tmp, src);
+    MoveF2I_reg_reg(dst, tmp);
+  %}
+%}
+
+
+// Convert a float to a long in a float register.
+// If the float is a NAN, stuff a zero in instead.
+instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
+  effect(DEF dst, USE src, KILL fcc0);
+  format %{ "FCMPs  fcc0,$src,$src\t! check for NAN\n\t"
+            "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
+            "FSTOX  $src,$dst\t! convert in delay slot\n\t"
+            "FXTOD  $dst,$dst\t! change NaN/max-long to valid double\n\t"
+            "FSUBd  $dst,$dst,$dst\t! cleared only if nan\n"
+      "skip:" %}
+  ins_encode(form_f2l_helper(src,dst));
+  ins_pipe(fcvtF2L);
+%}
+
+instruct convF2L_stk(stackSlotL dst, regF src) %{
+  match(Set dst (ConvF2L src));
+  ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convF2L_helper(tmp, src);
+    regD_to_stkL(dst, tmp);
+  %}
+%}
+
+instruct convF2L_reg(iRegL dst, regF src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvF2L src));
+  ins_cost(DEFAULT_COST*2 + BRANCH_COST);
+  expand %{
+    regD tmp;
+    convF2L_helper(tmp, src);
+    MoveD2L_reg_reg(dst, tmp);
+  %}
+%}
+
+
+instruct convI2D_helper(regD dst, regF tmp) %{
+  effect(USE tmp, DEF dst);
+  format %{ "FITOD  $tmp,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
+  ins_encode(form3_opf_rs2F_rdD(tmp, dst));
+  ins_pipe(fcvtI2D);
+%}
+
+instruct convI2D_stk(stackSlotI src, regD dst) %{
+  match(Set dst (ConvI2D src));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  expand %{
+    regF tmp;
+    stkI_to_regF(tmp, src);
+    convI2D_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2D_reg(regD_low dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvI2D src));
+  expand %{
+    regF tmp;
+    MoveI2F_reg_reg(tmp, src);
+    convI2D_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2D_mem(regD_low dst, memory mem) %{
+  match(Set dst (ConvI2D (LoadI mem)));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  size(8);
+  format %{ "LDF    $mem,$dst\n\t"
+            "FITOD  $dst,$dst" %}
+  opcode(Assembler::ldf_op3, Assembler::fitod_opf);
+  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
+  ins_pipe(floadF_mem);
+%}
+
+
+instruct convI2F_helper(regF dst, regF tmp) %{
+  effect(DEF dst, USE tmp);
+  format %{ "FITOS  $tmp,$dst" %}
+  opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
+  ins_encode(form3_opf_rs2F_rdF(tmp, dst));
+  ins_pipe(fcvtI2F);
+%}
+
+instruct convI2F_stk(regF dst, stackSlotI src) %{
+  match(Set dst (ConvI2F src));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  expand %{
+    regF tmp;
+    stkI_to_regF(tmp,src);
+    convI2F_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2F_reg(regF dst, iRegI src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvI2F src));
+  ins_cost(DEFAULT_COST);
+  expand %{
+    regF tmp;
+    MoveI2F_reg_reg(tmp, src);
+    convI2F_helper(dst, tmp);
+  %}
+%}
+
+instruct convI2F_mem( regF dst, memory mem ) %{
+  match(Set dst (ConvI2F (LoadI mem)));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+  size(8);
+  format %{ "LDF    $mem,$dst\n\t"
+            "FITOS  $dst,$dst" %}
+  opcode(Assembler::ldf_op3, Assembler::fitos_opf);
+  ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
+  ins_pipe(floadF_mem);
+%}
+
+
+instruct convI2L_reg(iRegL dst, iRegI src) %{
+  match(Set dst (ConvI2L src));
+  size(4);
+  format %{ "SRA    $src,0,$dst\t! int->long" %}
+  opcode(Assembler::sra_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Zero-extend convert int to long
+instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
+  match(Set dst (AndL (ConvI2L src) mask) );
+  size(4);
+  format %{ "SRL    $src,0,$dst\t! zero-extend int to long" %}
+  opcode(Assembler::srl_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Zero-extend long
+instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
+  match(Set dst (AndL src mask) );
+  size(4);
+  format %{ "SRL    $src,0,$dst\t! zero-extend long" %}
+  opcode(Assembler::srl_op3, Assembler::arith_op);
+  ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
+  ins_pipe(ialu_reg_reg);
+%}
+
+
 //-----------
 // Long to Double conversion using V8 opcodes.
 // Still useful because cheetah traps and becomes
@@ -8589,7 +8724,7 @@
   ins_pipe(fcvtL2D);
 %}
 
-instruct convL2D_reg_fast_fxtof(regD dst, stackSlotL src) %{
+instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
   predicate(VM_Version::has_fast_fxtof());
   match(Set dst (ConvL2D src));
   ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
@@ -8600,10 +8735,15 @@
   %}
 %}
 
-//-----------
-// Long to Float conversion using V8 opcodes.
-// Still useful because cheetah traps and becomes
-// amazingly slow for some common numbers.
+instruct convL2D_reg(regD dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvL2D src));
+  expand %{
+    regD tmp;
+    MoveL2D_reg_reg(tmp, src);
+    convL2D_helper(dst, tmp);
+  %}
+%}
 
 // Long to Float conversion using fast fxtof
 instruct convL2F_helper(regF dst, regD tmp) %{
@@ -8615,7 +8755,7 @@
   ins_pipe(fcvtL2F);
 %}
 
-instruct convL2F_reg_fast_fxtof(regF dst, stackSlotL src) %{
+instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
   match(Set dst (ConvL2F src));
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
   expand %{
@@ -8624,6 +8764,18 @@
     convL2F_helper(dst, tmp);
   %}
 %}
+
+instruct convL2F_reg(regF dst, iRegL src) %{
+  predicate(UseVIS >= 3);
+  match(Set dst (ConvL2F src));
+  ins_cost(DEFAULT_COST);
+  expand %{
+    regD tmp;
+    MoveL2D_reg_reg(tmp, src);
+    convL2F_helper(dst, tmp);
+  %}
+%}
+
 //-----------
 
 instruct convL2I_reg(iRegI dst, iRegL src) %{
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -440,7 +440,8 @@
 #undef __
 #define __ masm->
 
-  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
+  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
+                                   Register arg1 = noreg, Register arg2 = noreg) {
 #ifdef ASSERT
     int insts_size = VerifyThread ? 1 * K : 600;
 #else
@@ -476,6 +477,13 @@
     __ set_last_Java_frame(last_java_sp, G0);
     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
     __ save_thread(noreg);
+    if (arg1 != noreg) {
+      assert(arg2 != O1, "clobbered");
+      __ mov(arg1, O1);
+    }
+    if (arg2 != noreg) {
+      __ mov(arg2, O2);
+    }
     // do the call
     BLOCK_COMMENT("call runtime_entry");
     __ call(runtime_entry, relocInfo::runtime_call_type);
@@ -3240,6 +3248,14 @@
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
 #endif  // COMPILER2 !=> _LP64
+
+    // Build this early so it's available for the interpreter.  The
+    // stub expects the required and actual type to already be in O1
+    // and O2 respectively.
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, G5_method_type, G3_method_handle);
   }
 
 
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,11 +44,6 @@
   code_size2 = 20000            // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 15000
-};
-
 class Sparc {
  friend class StubGenerator;
 
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -128,24 +128,6 @@
 }
 
 
-// Arguments are: required type in G5_method_type, and
-// failing object (or NULL) in G3_method_handle.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-  // expression stack must be empty before entering the VM if an exception
-  // happened
-  __ empty_expression_stack();
-  // load exception object
-  __ call_VM(Oexception,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             G5_method_type,    // required
-             G3_method_handle); // actual
-  __ should_not_reach_here();
-  return entry;
-}
-
-
 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
   address entry = __ pc();
   // expression stack must be empty before entering the VM if an exception happened
@@ -1712,7 +1694,7 @@
       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
     } else {
-      assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
+      assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases");
       // Don't have Lesp available; lay out locals block in the caller
       // adjacent to the register window save area.
       //
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -266,7 +266,7 @@
 
 void TemplateTable::ldc(bool wide) {
   transition(vtos, vtos);
-  Label call_ldc, notInt, notString, notClass, exit;
+  Label call_ldc, notInt, isString, notString, notClass, exit;
 
   if (wide) {
     __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
@@ -317,8 +317,11 @@
 
   __ bind(notInt);
  // __ cmp(O2, JVM_CONSTANT_String);
+  __ brx(Assembler::equal, true, Assembler::pt, isString);
+  __ delayed()->cmp(O2, JVM_CONSTANT_Object);
   __ brx(Assembler::notEqual, true, Assembler::pt, notString);
   __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
+  __ bind(isString);
   __ ld_ptr(O0, O1, Otos_i);
   __ verify_oop(Otos_i);
   __ push(atos);
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -144,6 +144,18 @@
   // buf is started with ", " or is empty
   _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
 
+  // UseVIS is set to the smallest of what hardware supports and what
+  // the command line requires.  I.e., you cannot set UseVIS to 3 on
+  // older UltraSparc which do not support it.
+  if (UseVIS > 3) UseVIS=3;
+  if (UseVIS < 0) UseVIS=0;
+  if (!has_vis3()) // Drop to 2 if no VIS3 support
+    UseVIS = MIN2((intx)2,UseVIS);
+  if (!has_vis2()) // Drop to 1 if no VIS2 support
+    UseVIS = MIN2((intx)1,UseVIS);
+  if (!has_vis1()) // Drop to 0 if no VIS1 support
+    UseVIS = 0;
+
 #ifndef PRODUCT
   if (PrintMiscellaneous && Verbose) {
     tty->print("Allocation: ");
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -3804,6 +3804,14 @@
   emit_arith(0x03, 0xC0, dst, src);
 }
 
+void Assembler::andq(Address dst, int32_t imm32) {
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_byte(0x81);
+  emit_operand(rsp, dst, 4);
+  emit_long(imm32);
+}
+
 void Assembler::andq(Register dst, int32_t imm32) {
   (void) prefixq_and_encode(dst->encoding());
   emit_arith(0x81, 0xE0, dst, imm32);
@@ -5090,7 +5098,7 @@
   } else {
     ttyLocker ttyl;
     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
-    assert(false, "DEBUG MESSAGE");
+    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
   }
   ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
 }
@@ -5653,6 +5661,7 @@
     ttyLocker ttyl;
     ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
                     msg);
+    assert(false, err_msg("DEBUG MESSAGE: %s", msg));
   }
 }
 
@@ -5890,6 +5899,53 @@
   call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
 }
 
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   int number_of_arguments,
+                                   bool check_exceptions) {
+  Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
+  MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   bool check_exceptions) {
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   Register arg_2,
+                                   bool check_exceptions) {
+
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+                                   Register last_java_sp,
+                                   address entry_point,
+                                   Register arg_1,
+                                   Register arg_2,
+                                   Register arg_3,
+                                   bool check_exceptions) {
+  LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+  LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+  pass_arg3(this, arg_3);
+  LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+  pass_arg2(this, arg_2);
+  pass_arg1(this, arg_1);
+  super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
+}
+
 void MacroAssembler::call_VM_base(Register oop_result,
                                   Register java_thread,
                                   Register last_java_sp,
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -779,6 +779,7 @@
   void andl(Register dst, Address src);
   void andl(Register dst, Register src);
 
+  void andq(Address  dst, int32_t imm32);
   void andq(Register dst, int32_t imm32);
   void andq(Register dst, Address src);
   void andq(Register dst, Register src);
@@ -1660,6 +1661,14 @@
                Register arg_1, Register arg_2, Register arg_3,
                bool check_exceptions = true);
 
+  // These always tightly bind to MacroAssembler::call_VM_base
+  // bypassing the virtual implementation
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+  void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
+
   void call_VM_leaf(address entry_point,
                     int number_of_arguments = 0);
   void call_VM_leaf(address entry_point,
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -47,7 +47,7 @@
 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
   // of 128-bits operands for SSE instructions.
-  jlong *operand = (jlong*)(((long)adr)&((long)(~0xF)));
+  jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
   // Store the value to a 128-bits operand.
   operand[0] = lo;
   operand[1] = hi;
@@ -3113,7 +3113,6 @@
     // reload the register args properly if we go slow path. Yuck
 
     // These are proper for the calling convention
-
     store_parameter(length, 2);
     store_parameter(dst_pos, 1);
     store_parameter(dst, 0);
@@ -3351,12 +3350,15 @@
           __ jcc(Assembler::notEqual, *stub->entry());
         }
 
+       // Spill because stubs can use any register they like and it's
+       // easier to restore just those that we care about.
+       store_parameter(dst, 0);
+       store_parameter(dst_pos, 1);
+       store_parameter(length, 2);
+       store_parameter(src_pos, 3);
+       store_parameter(src, 4);
+
 #ifndef _LP64
-        // save caller save registers
-        store_parameter(rax, 2);
-        store_parameter(rcx, 1);
-        store_parameter(rdx, 0);
-
         __ movptr(tmp, dst_klass_addr);
         __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
         __ push(tmp);
@@ -3372,17 +3374,6 @@
 #else
         __ movl2ptr(length, length); //higher 32bits must be null
 
-        // save caller save registers: copy them to callee save registers
-        __ mov(rbx, rdx);
-        __ mov(r13, r8);
-        __ mov(r14, r9);
-#ifndef _WIN64
-        store_parameter(rsi, 1);
-        store_parameter(rcx, 0);
-        // on WIN64 other incoming parameters are in rdi and rsi saved
-        // across the call
-#endif
-
         __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
         assert_different_registers(c_rarg0, dst, dst_pos, length);
         __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@@ -3432,25 +3423,13 @@
 
         __ xorl(tmp, -1);
 
-#ifndef _LP64
-        // restore caller save registers
-        assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost
-        __ movptr(rdx, Address(rsp, 0*BytesPerWord));
-        __ movptr(rcx, Address(rsp, 1*BytesPerWord));
-        __ movptr(rax, Address(rsp, 2*BytesPerWord));
-#else
-        // restore caller save registers
-        __ mov(rdx, rbx);
-        __ mov(r8, r13);
-        __ mov(r9, r14);
-#ifndef _WIN64
-        assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost
-        __ movptr(rcx, Address(rsp, 0*BytesPerWord));
-        __ movptr(rsi, Address(rsp, 1*BytesPerWord));
-#else
-        assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost
-#endif
-#endif
+        // Restore previously spilled arguments
+        __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
+        __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
+        __ movptr   (length,  Address(rsp, 2*BytesPerWord));
+        __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
+        __ movptr   (src,     Address(rsp, 4*BytesPerWord));
+
 
         __ subl(length, tmp);
         __ addl(src_pos, tmp);
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -45,6 +45,7 @@
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
+  adjust_unextended_sp();
 
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
@@ -92,6 +93,7 @@
   // assert(_pc != NULL, "no pc?");
 
   _cb = CodeCache::find_blob(_pc);
+  adjust_unextended_sp();
 
   address original_pc = nmethod::get_deopt_original_pc(this);
   if (original_pc != NULL) {
--- a/hotspot/src/cpu/x86/vm/icache_x86.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/icache_x86.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -43,8 +43,8 @@
 #ifdef AMD64
   enum {
     stub_size      = 64, // Size of the icache flush stub in bytes
-    line_size      = 32, // Icache line size in bytes
-    log2_line_size = 5   // log2(line_size)
+    line_size      = 64, // Icache line size in bytes
+    log2_line_size = 6   // log2(line_size)
   };
 
   // Use default implementation
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -403,9 +403,9 @@
     // interp_only_mode if these events CAN be enabled.
     get_thread(temp);
     // interp_only is an int, on little endian it is sufficient to test the byte only
-    // Is a cmpl faster (ce
+    // Is a cmpl faster?
     cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
-    jcc(Assembler::zero, run_compiled_code);
+    jccb(Assembler::zero, run_compiled_code);
     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
     bind(run_compiled_code);
   }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -402,7 +402,7 @@
     // interp_only is an int, on little endian it is sufficient to test the byte only
     // Is a cmpl faster?
     cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
-    jcc(Assembler::zero, run_compiled_code);
+    jccb(Assembler::zero, run_compiled_code);
     jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
     bind(run_compiled_code);
   }
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
 
@@ -37,6 +38,11 @@
 
 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+static RegisterOrConstant constant(int value) {
+  return RegisterOrConstant(value);
+}
+
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
   // Just before the actual machine code entry point, allocate space
@@ -139,9 +145,9 @@
 
 void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
                                                           // output params:
-                                                          int* frame_size_in_words,
                                                           int* bounce_offset,
-                                                          int* exception_offset) {
+                                                          int* exception_offset,
+                                                          int* frame_size_in_words) {
   (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
 
   address start = __ pc();
@@ -366,7 +372,7 @@
                                     Register rdi_stack_move,
                                     Register rcx_amh,
                                     bool might_be_negative) {
-  BLOCK_COMMENT("load_stack_move");
+  BLOCK_COMMENT("load_stack_move {");
   Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
   __ movl(rdi_stack_move, rcx_amh_conversion);
   __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
@@ -387,9 +393,10 @@
     __ stop("load_stack_move of garbage value");
     __ BIND(L_ok);
   }
+  BLOCK_COMMENT("} load_stack_move");
 }
 
-#ifndef PRODUCT
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_offsets() {
   // Check compatibility of this struct with the more generally used offsets of class frame:
   int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
@@ -539,6 +546,28 @@
 }
 #endif //ASSERT
 
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) {
+  if (JvmtiExport::can_post_interpreter_events()) {
+    Label run_compiled_code;
+    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+    // compiled code in threads for which the event is enabled.  Check here for
+    // interp_only_mode if these events CAN be enabled.
+#ifdef _LP64
+    Register rthread = r15_thread;
+#else
+    Register rthread = temp;
+    __ get_thread(rthread);
+#endif
+    // interp_only is an int, on little endian it is sufficient to test the byte only
+    // Is a cmpl faster?
+    __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
+    __ jccb(Assembler::zero, run_compiled_code);
+    __ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
+    __ bind(run_compiled_code);
+  }
+  __ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
+}
+
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
   // rbx: methodOop
@@ -555,13 +584,11 @@
   // emit WrongMethodType path first, to enable jccb back-branch from main path
   Label wrong_method_type;
   __ bind(wrong_method_type);
-  Label invoke_generic_slow_path;
+  Label invoke_generic_slow_path, invoke_exact_error_path;
   assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
   __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
   __ jcc(Assembler::notEqual, invoke_generic_slow_path);
-  __ push(rax_mtype);       // required mtype
-  __ push(rcx_recv);        // bad mh (1st stacked argument)
-  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
+  __ jmp(invoke_exact_error_path);
 
   // here's where control starts out:
   __ align(CodeEntryAlignment);
@@ -595,6 +622,11 @@
 
   __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
 
+  // error path for invokeExact (only)
+  __ bind(invoke_exact_error_path);
+  // Stub wants expected type in rax and the actual type in rcx
+  __ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
+
   // for invokeGeneric (only), apply argument and result conversions on the fly
   __ bind(invoke_generic_slow_path);
 #ifdef ASSERT
@@ -632,11 +664,6 @@
   return entry_point;
 }
 
-// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
-static RegisterOrConstant constant(int value) {
-  return RegisterOrConstant(value);
-}
-
 // Helper to insert argument slots into the stack.
 // arg_slots must be a multiple of stack_move_unit() and < 0
 // rax_argslot is decremented to point to the new (shifted) location of the argslot
@@ -1115,9 +1142,6 @@
   guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
 
   // some handy addresses
-  Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
-  Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );
-
   Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
   Address rcx_dmh_vmindex(    rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
 
@@ -1147,7 +1171,7 @@
 
   trace_method_handle(_masm, entry_name(ek));
 
-  BLOCK_COMMENT(entry_name(ek));
+  BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
 
   switch ((int) ek) {
   case _raise_exception:
@@ -1158,32 +1182,24 @@
       assert(raise_exception_method(), "must be set");
       assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
 
-      const Register rdi_pc = rax;
-      __ pop(rdi_pc);  // caller PC
+      const Register rax_pc = rax;
+      __ pop(rax_pc);  // caller PC
       __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started
 
       Register rbx_method = rbx_temp;
-      Label L_no_method;
-      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
       __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
-      __ testptr(rbx_method, rbx_method);
-      __ jccb(Assembler::zero, L_no_method);
 
       const int jobject_oop_offset = 0;
       __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
-      __ testptr(rbx_method, rbx_method);
-      __ jccb(Assembler::zero, L_no_method);
-      __ verify_oop(rbx_method);
+
+      __ movptr(rsi, rsp);
+      __ subptr(rsp, 3 * wordSize);
+      __ push(rax_pc);         // restore caller PC
 
-      NOT_LP64(__ push(rarg2_required));
-      __ push(rdi_pc);         // restore caller PC
-      __ jmp(rbx_method_fce);  // jump to compiled entry
-
-      // Do something that is at least causes a valid throw from the interpreter.
-      __ bind(L_no_method);
-      __ push(rarg2_required);
-      __ push(rarg1_actual);
-      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
+      __ movptr(__ argument_address(constant(2)), rarg0_code);
+      __ movptr(__ argument_address(constant(1)), rarg1_actual);
+      __ movptr(__ argument_address(constant(0)), rarg2_required);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1202,7 +1218,7 @@
         __ null_check(rcx_recv);
         __ verify_oop(rcx_recv);
       }
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1235,7 +1251,7 @@
       __ movptr(rbx_method, vtable_entry_addr);
 
       __ verify_oop(rbx_method);
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
     }
     break;
 
@@ -1270,7 +1286,7 @@
                                  no_such_interface);
 
       __ verify_oop(rbx_method);
-      __ jmp(rbx_method_fie);
+      jump_from_method_handle(_masm, rbx_method, rax);
       __ hlt();
 
       __ bind(no_such_interface);
@@ -1292,7 +1308,7 @@
   case _bound_int_direct_mh:
   case _bound_long_direct_mh:
     {
-      bool direct_to_method = (ek >= _bound_ref_direct_mh);
+      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
       BasicType arg_type  = ek_bound_mh_arg_type(ek);
       int       arg_slots = type2size[arg_type];
 
@@ -1318,7 +1334,7 @@
         Register rbx_method = rbx_temp;
         __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
         __ verify_oop(rbx_method);
-        __ jmp(rbx_method_fie);
+        jump_from_method_handle(_masm, rbx_method, rax);
       } else {
         __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
         __ verify_oop(rcx_recv);
@@ -1632,14 +1648,16 @@
           //   rax = src_addr + swap_bytes
           //   rbx = dest_addr
           //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
-          __ addptr(rbx_destslot, wordSize);
+          // dest_slot denotes an exclusive upper limit
+          int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
+          if (limit_bias != 0)
+            __ addptr(rbx_destslot, - limit_bias * wordSize);
           move_arg_slots_down(_masm,
                               Address(rax_argslot, swap_slots * wordSize),
                               rbx_destslot,
                               -swap_slots,
                               rax_argslot, rdx_temp);
-
-          __ subptr(rbx_destslot, wordSize);
+          __ subptr(rbx_destslot, swap_slots * wordSize);
         }
         // pop the original first chunk into the destination slot, now free
         for (int i = 0; i < swap_slots; i++) {
@@ -1929,7 +1947,7 @@
       // In the non-retaining case, this might move keep2 either up or down.
       // We don't have to copy the whole | RF... collect | complex,
       // but we must adjust RF.saved_args_base.
-      // Also, from now on, we will forget about the origial copy of |collect|.
+      // Also, from now on, we will forget about the original copy of |collect|.
       // If we are retaining it, we will treat it as part of |keep2|.
       // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
 
@@ -1986,7 +2004,7 @@
       // Net shift (&new_argv - &old_argv) is (close_count - open_count).
       bool zero_open_count = (open_count == 0);  // remember this bit of info
       if (move_keep3 && fix_arg_base) {
-        // It will be easier t have everything in one register:
+        // It will be easier to have everything in one register:
         if (close_count.is_register()) {
           // Deduct open_count from close_count register to get a clean +/- value.
           __ subptr(close_count.as_register(), open_count);
@@ -2396,6 +2414,7 @@
     __ nop();
     return;
   }
+  BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
   __ hlt();
 
   address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -25,6 +25,11 @@
 // Platform-specific definitions for method handles.
 // These definitions are inlined into class MethodHandles.
 
+// Adapters
+enum /* platform_dependent_constants */ {
+  adapter_code_size = NOT_LP64(30000 DEBUG_ONLY(+ 10000)) LP64_ONLY(80000 DEBUG_ONLY(+ 120000))
+};
+
 public:
 
 // The stack just after the recursive call from a ricochet frame
@@ -188,7 +193,9 @@
 
   static void generate_ricochet_blob(MacroAssembler* _masm,
                                      // output params:
-                                     int* frame_size_in_words, int* bounce_offset, int* exception_offset);
+                                     int* bounce_offset,
+                                     int* exception_offset,
+                                     int* frame_size_in_words);
 
   static void enter_ricochet_frame(MacroAssembler* _masm,
                                    Register rcx_recv,
@@ -284,6 +291,10 @@
                  "reference is a MH");
   }
 
+  // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+  // Takes care of special dispatch from single stepping too.
+  static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
+
   static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
 
   static Register saved_last_sp_register() {
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
 
 #define __ masm->
 
-ExceptionBlob*     OptoRuntime::_exception_blob;
-
 //------------------------------generate_exception_blob---------------------------
 // creates exception blob at the end
 // Using exception blob, this code is jumped from a compiled method.
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -42,18 +42,6 @@
 #endif
 
 #define __ masm->
-#ifdef COMPILER2
-UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
-#endif // COMPILER2
-
-DeoptimizationBlob *SharedRuntime::_deopt_blob;
-SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*       SharedRuntime::_wrong_method_blob;
-RuntimeStub*       SharedRuntime::_ic_miss_blob;
-RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
 
 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
 
@@ -2253,31 +2241,6 @@
   return 0;
 }
 
-//----------------------------generate_ricochet_blob---------------------------
-void SharedRuntime::generate_ricochet_blob() {
-  if (!EnableInvokeDynamic)  return;  // leave it as a null
-
-  // allocate space for the code
-  ResourceMark rm;
-  // setup code generation tools
-  CodeBuffer   buffer("ricochet_blob", 256, 256);
-  MacroAssembler* masm = new MacroAssembler(&buffer);
-
-  int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
-  MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
-
-  // -------------
-  // make sure all code is generated
-  masm->flush();
-
-  // failed to generate?
-  if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
-    assert(false, "bad ricochet blob");
-    return;
-  }
-
-  _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
-}
 
 //------------------------------generate_deopt_blob----------------------------
 void SharedRuntime::generate_deopt_blob() {
@@ -2816,7 +2779,7 @@
 // setup oopmap, and calls safepoint code to stop the compiled code for
 // a safepoint.
 //
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
 
   // Account for thread arg in our frame
   const int additional_words = 1;
@@ -2913,7 +2876,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -2995,36 +2958,3 @@
   // frame_size_words or bytes??
   return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
 }
-
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                        "wrong_method_stub");
-
-  _ic_miss_blob      = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_ricochet_blob();
-
-  generate_deopt_blob();
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -41,24 +41,10 @@
 #include "opto/runtime.hpp"
 #endif
 
-DeoptimizationBlob *SharedRuntime::_deopt_blob;
-#ifdef COMPILER2
-UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
-ExceptionBlob      *OptoRuntime::_exception_blob;
-#endif // COMPILER2
-
-SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
-SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
-RuntimeStub*       SharedRuntime::_wrong_method_blob;
-RuntimeStub*       SharedRuntime::_ic_miss_blob;
-RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
-RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
+#define __ masm->
 
 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
 
-#define __ masm->
-
 class SimpleRuntimeFrame {
 
   public:
@@ -2530,32 +2516,6 @@
 }
 
 
-//----------------------------generate_ricochet_blob---------------------------
-void SharedRuntime::generate_ricochet_blob() {
-  if (!EnableInvokeDynamic)  return;  // leave it as a null
-
-  // allocate space for the code
-  ResourceMark rm;
-  // setup code generation tools
-  CodeBuffer   buffer("ricochet_blob", 512, 512);
-  MacroAssembler* masm = new MacroAssembler(&buffer);
-
-  int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
-  MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
-
-  // -------------
-  // make sure all code is generated
-  masm->flush();
-
-  // failed to generate?
-  if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
-    assert(false, "bad ricochet blob");
-    return;
-  }
-
-  _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
-}
-
 //------------------------------generate_deopt_blob----------------------------
 void SharedRuntime::generate_deopt_blob() {
   // Allocate space for the code
@@ -3046,7 +3006,7 @@
 // Generate a special Compile2Runtime blob that saves all registers,
 // and setup oopmap.
 //
-static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
   assert(StubRoutines::forward_exception_entry() != NULL,
          "must be generated before");
 
@@ -3132,7 +3092,7 @@
 // but since this is generic code we don't know what they are and the caller
 // must do any gc of the args.
 //
-static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
   assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
 
   // allocate space for the code
@@ -3209,38 +3169,6 @@
 }
 
 
-void SharedRuntime::generate_stubs() {
-
-  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
-                                        "wrong_method_stub");
-  _ic_miss_blob =      generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
-                                        "ic_miss_stub");
-  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
-                                        "resolve_opt_virtual_call");
-
-  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
-                                        "resolve_virtual_call");
-
-  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
-                                        "resolve_static_call");
-  _polling_page_safepoint_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), false);
-
-  _polling_page_return_handler_blob =
-    generate_handler_blob(CAST_FROM_FN_PTR(address,
-                   SafepointSynchronize::handle_polling_page_exception), true);
-
-  generate_ricochet_blob();
-
-  generate_deopt_blob();
-
-#ifdef COMPILER2
-  generate_uncommon_trap_blob();
-#endif // COMPILER2
-}
-
-
 #ifdef COMPILER2
 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
 //
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2151,6 +2151,8 @@
   // if they expect all registers to be preserved.
   enum layout {
     thread_off,    // last_java_sp
+    arg1_off,
+    arg2_off,
     rbp_off,       // callee saved register
     ret_pc,
     framesize
@@ -2185,7 +2187,7 @@
   // either at call sites or otherwise assume that stack unwinding will be initiated,
   // so caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name, address runtime_entry,
-                                   bool restore_saved_exception_pc) {
+                                   bool restore_saved_exception_pc, Register arg1 = noreg, Register arg2 = noreg) {
 
     int insts_size = 256;
     int locs_size  = 32;
@@ -2218,6 +2220,13 @@
 
     // push java thread (becomes first argument of C function)
     __ movptr(Address(rsp, thread_off * wordSize), java_thread);
+    if (arg1 != noreg) {
+      __ movptr(Address(rsp, arg1_off * wordSize), arg1);
+    }
+    if (arg2 != noreg) {
+      assert(arg1 != noreg, "missing reg arg");
+      __ movptr(Address(rsp, arg2_off * wordSize), arg2);
+    }
 
     // Set up last_Java_sp and last_Java_fp
     __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
@@ -2309,6 +2318,12 @@
                                                                                    CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
     StubRoutines::_d2l_wrapper                              = generate_d2i_wrapper(T_LONG,
                                                                                    CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
+
+    // Build this early so it's available for the interpreter
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, rax, rcx);
   }
 
 
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2934,7 +2934,9 @@
   // caller saved registers were assumed volatile in the compiler.
   address generate_throw_exception(const char* name,
                                    address runtime_entry,
-                                   bool restore_saved_exception_pc) {
+                                   bool restore_saved_exception_pc,
+                                   Register arg1 = noreg,
+                                   Register arg2 = noreg) {
     // Information about frame layout at time of blocking runtime call.
     // Note that we only have to preserve callee-saved registers since
     // the compilers are responsible for supplying a continuation point
@@ -2980,6 +2982,13 @@
     __ set_last_Java_frame(rsp, rbp, NULL);
 
     // Call runtime
+    if (arg1 != noreg) {
+      assert(arg2 != c_rarg1, "clobbered");
+      __ movptr(c_rarg1, arg1);
+    }
+    if (arg2 != noreg) {
+      __ movptr(c_rarg2, arg2);
+    }
     __ movptr(c_rarg0, r15_thread);
     BLOCK_COMMENT("call runtime_entry");
     __ call(RuntimeAddress(runtime_entry));
@@ -3052,6 +3061,14 @@
     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
 
     StubRoutines::x86::_verify_mxcsr_entry    = generate_verify_mxcsr();
+
+    // Build this early so it's available for the interpreter.  Stub
+    // expects the required and actual types as register arguments in
+    // j_rarg0 and j_rarg1 respectively.
+    StubRoutines::_throw_WrongMethodTypeException_entry =
+      generate_throw_exception("WrongMethodTypeException throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
+                               false, rax, rcx);
   }
 
   void generate_all() {
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -34,11 +34,6 @@
   code_size2 = 22000            // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 30000 DEBUG_ONLY(+ 10000)
-};
-
 class x86 {
  friend class StubGenerator;
  friend class VMStructs;
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,11 +36,6 @@
   code_size2 = 22000           // simply increase if too small (assembler will crash if too small)
 };
 
-// MethodHandles adapters
-enum method_handles_platform_dependent_constants {
-  method_handles_adapters_code_size = 80000 DEBUG_ONLY(+ 120000)
-};
-
 class x86 {
  friend class StubGenerator;
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -112,32 +112,6 @@
   return entry;
 }
 
-// Arguments are: required type at TOS+4, failing object (or NULL) at TOS.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-
-  __ pop(rbx);                  // actual failing object is at TOS
-  __ pop(rax);                  // required type is at TOS+4
-
-  __ verify_oop(rbx);
-  __ verify_oop(rax);
-
-  // Various method handle types use interpreter registers as temps.
-  __ restore_bcp();
-  __ restore_locals();
-
-  // Expression stack must be empty before entering the VM for an exception.
-  __ empty_expression_stack();
-  __ empty_FPU_stack();
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             // pass required type, failing object (or NULL)
-             rax, rbx);
-  return entry;
-}
-
-
 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
   assert(!pass_oop || message == NULL, "either oop or message but not both");
   address entry = __ pc();
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -120,31 +120,6 @@
   return entry;
 }
 
-// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
-address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
-  address entry = __ pc();
-
-  __ pop(c_rarg2);              // failing object is at TOS
-  __ pop(c_rarg1);              // required type is at TOS+8
-
-  __ verify_oop(c_rarg1);
-  __ verify_oop(c_rarg2);
-
-  // Various method handle types use interpreter registers as temps.
-  __ restore_bcp();
-  __ restore_locals();
-
-  // Expression stack must be empty before entering the VM for an exception.
-  __ empty_expression_stack();
-
-  __ call_VM(noreg,
-             CAST_FROM_FN_PTR(address,
-                              InterpreterRuntime::throw_WrongMethodTypeException),
-             // pass required type, failing object (or NULL)
-             c_rarg1, c_rarg2);
-  return entry;
-}
-
 address TemplateInterpreterGenerator::generate_exception_handler_common(
         const char* name, const char* message, bool pass_oop) {
   assert(!pass_oop || message == NULL, "either oop or message but not both");
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -373,15 +373,17 @@
     __ jcc(Assembler::equal, L);
     __ cmpl(rdx, JVM_CONSTANT_String);
     __ jcc(Assembler::equal, L);
+    __ cmpl(rdx, JVM_CONSTANT_Object);
+    __ jcc(Assembler::equal, L);
     __ stop("unexpected tag type in ldc");
     __ bind(L);
   }
 #endif
   Label isOop;
   // atos and itos
-  // String is only oop type we will see here
-  __ cmpl(rdx, JVM_CONSTANT_String);
-  __ jccb(Assembler::equal, isOop);
+  // Integer is only non-oop type we will see here
+  __ cmpl(rdx, JVM_CONSTANT_Integer);
+  __ jccb(Assembler::notEqual, isOop);
   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
   __ push(itos);
   __ jmp(Done);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -385,6 +385,8 @@
     __ jcc(Assembler::equal, L);
     __ cmpl(rdx, JVM_CONSTANT_String);
     __ jcc(Assembler::equal, L);
+    __ cmpl(rdx, JVM_CONSTANT_Object);
+    __ jcc(Assembler::equal, L);
     __ stop("unexpected tag type in ldc");
     __ bind(L);
   }
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -321,6 +321,20 @@
   if (UseSSE < 2) UseSSE = 2;
 #endif
 
+#ifdef AMD64
+  // flush_icache_stub have to be generated first.
+  // That is why Icache line size is hard coded in ICache class,
+  // see icache_x86.hpp. It is also the reason why we can't use
+  // clflush instruction in 32-bit VM since it could be running
+  // on CPU which does not support it.
+  //
+  // The only thing we can do is to verify that flushed
+  // ICache::line_size has correct value.
+  guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
+  // clflush_size is size in quadwords (8 bytes).
+  guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
+#endif
+
   // If the OS doesn't support SSE, we can't use this feature even if the HW does
   if (!os::supports_sse())
     _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -91,7 +91,9 @@
                cmpxchg8 : 1,
                         : 6,
                cmov     : 1,
-                        : 7,
+                        : 3,
+               clflush  : 1,
+                        : 3,
                mmx      : 1,
                fxsr     : 1,
                sse      : 1,
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Aug 17 22:47:12 2011 -0700
@@ -830,6 +830,17 @@
   }
 }
 
+// This could be in MacroAssembler but it's fairly C2 specific
+void emit_cmpfp_fixup(MacroAssembler& _masm) {
+  Label exit;
+  __ jccb(Assembler::noParity, exit);
+  __ pushf();
+  __ andq(Address(rsp, 0), 0xffffff2b);
+  __ popf();
+  __ bind(exit);
+  __ nop(); // (target for branch to avoid branch to branch)
+}
+
 
 //=============================================================================
 const bool Matcher::constant_table_absolute_addressing = true;
@@ -2173,27 +2184,9 @@
     emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
   %}
 
-  enc_class cmpfp_fixup()
-  %{
-    // jnp,s exit
-    emit_opcode(cbuf, 0x7B);
-    emit_d8(cbuf, 0x0A);
-
-    // pushfq
-    emit_opcode(cbuf, 0x9C);
-
-    // andq $0xffffff2b, (%rsp)
-    emit_opcode(cbuf, Assembler::REX_W);
-    emit_opcode(cbuf, 0x81);
-    emit_opcode(cbuf, 0x24);
-    emit_opcode(cbuf, 0x24);
-    emit_d32(cbuf, 0xffffff2b);
-
-    // popfq
-    emit_opcode(cbuf, 0x9D);
-
-    // nop (target for branch to avoid branch to branch)
-    emit_opcode(cbuf, 0x90);
+  enc_class cmpfp_fixup() %{
+      MacroAssembler _masm(&cbuf);
+      emit_cmpfp_fixup(_masm);
   %}
 
   enc_class cmpfp3(rRegI dst)
@@ -3179,50 +3172,6 @@
     emit_rm(cbuf, 0x3, 0x0, dstenc);
   %}
 
-  enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
-                       rcx_RegI tmp)
-  %{
-    // cadd_cmpLT
-
-    int tmpReg = $tmp$$reg;
-
-    int penc = $p$$reg;
-    int qenc = $q$$reg;
-    int yenc = $y$$reg;
-
-    // subl $p,$q
-    if (penc < 8) {
-      if (qenc >= 8) {
-        emit_opcode(cbuf, Assembler::REX_B);
-      }
-    } else {
-      if (qenc < 8) {
-        emit_opcode(cbuf, Assembler::REX_R);
-      } else {
-        emit_opcode(cbuf, Assembler::REX_RB);
-      }
-    }
-    emit_opcode(cbuf, 0x2B);
-    emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
-
-    // sbbl $tmp, $tmp
-    emit_opcode(cbuf, 0x1B);
-    emit_rm(cbuf, 0x3, tmpReg, tmpReg);
-
-    // andl $tmp, $y
-    if (yenc >= 8) {
-      emit_opcode(cbuf, Assembler::REX_B);
-    }
-    emit_opcode(cbuf, 0x23);
-    emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
-
-    // addl $p,$tmp
-    if (penc >= 8) {
-        emit_opcode(cbuf, Assembler::REX_R);
-    }
-    emit_opcode(cbuf, 0x03);
-    emit_rm(cbuf, 0x3, penc & 7, tmpReg);
-  %}
 
   // Compare the lonogs and set -1, 0, or 1 into dst
   enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
@@ -10206,9 +10155,7 @@
 %}
 
 
-instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
-                         rRegI tmp,
-                         rFlagsReg cr)
+instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr)
 %{
   match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
   effect(TEMP tmp, KILL cr);
@@ -10218,25 +10165,19 @@
             "sbbl    $tmp, $tmp\n\t"
             "andl    $tmp, $y\n\t"
             "addl    $p, $tmp" %}
-  ins_encode(enc_cmpLTP(p, q, y, tmp));
+  ins_encode %{
+    Register Rp = $p$$Register;
+    Register Rq = $q$$Register;
+    Register Ry = $y$$Register;
+    Register Rt = $tmp$$Register;
+    __ subl(Rp, Rq);
+    __ sbbl(Rt, Rt);
+    __ andl(Rt, Ry);
+    __ addl(Rp, Rt);
+  %}
   ins_pipe(pipe_cmplt);
 %}
 
-/* If I enable this, I encourage spilling in the inner loop of compress.
-instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
-%{
-  match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
-  effect( TEMP tmp, KILL cr );
-  ins_cost(400);
-
-  format %{ "SUB    $p,$q\n\t"
-            "SBB    RCX,RCX\n\t"
-            "AND    RCX,$y\n\t"
-            "ADD    $p,RCX" %}
-  ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
-%}
-*/
-
 //---------- FP Instructions------------------------------------------------
 
 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
@@ -10305,14 +10246,8 @@
             "popfq\n"
     "exit:   nop\t# avoid branch to branch" %}
   ins_encode %{
-    Label L_exit;
     __ ucomiss($src$$XMMRegister, $constantaddress($con));
-    __ jcc(Assembler::noParity, L_exit);
-    __ pushf();
-    __ andq(rsp, 0xffffff2b);
-    __ popf();
-    __ bind(L_exit);
-    __ nop();
+    emit_cmpfp_fixup(_masm);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -10393,14 +10328,8 @@
             "popfq\n"
     "exit:   nop\t# avoid branch to branch" %}
   ins_encode %{
-    Label L_exit;
     __ ucomisd($src$$XMMRegister, $constantaddress($con));
-    __ jcc(Assembler::noParity, L_exit);
-    __ pushf();
-    __ andq(rsp, 0xffffff2b);
-    __ popf();
-    __ bind(L_exit);
-    __ nop();
+    emit_cmpfp_fixup(_masm);
   %}
   ins_pipe(pipe_slow);
 %}
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -657,7 +657,7 @@
   if (!is_exact) {
     if (method->intrinsic_id() == vmIntrinsics::_invokeExact) {
       CALL_VM_NOCHECK_NOFIX(
-        InterpreterRuntime::throw_WrongMethodTypeException(
+        SharedRuntime::throw_WrongMethodTypeException(
           thread, method_type, mhtype));
       // NB all oops trashed!
       assert(HAS_PENDING_EXCEPTION, "should do");
@@ -673,7 +673,7 @@
     oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form);
     if (adapter == NULL) {
       CALL_VM_NOCHECK_NOFIX(
-        InterpreterRuntime::throw_WrongMethodTypeException(
+        SharedRuntime::throw_WrongMethodTypeException(
           thread, method_type, mhtype));
       // NB all oops trashed!
       assert(HAS_PENDING_EXCEPTION, "should do");
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -169,7 +169,35 @@
 /* Used to protect dlsym() calls */
 static pthread_mutex_t dl_mutex;
 
-////////////////////////////////////////////////////////////////////////////////
+#ifdef JAVASE_EMBEDDED
+class MemNotifyThread: public Thread {
+  friend class VMStructs;
+ public:
+  virtual void run();
+
+ private:
+  static MemNotifyThread* _memnotify_thread;
+  int _fd;
+
+ public:
+
+  // Constructor
+  MemNotifyThread(int fd);
+
+  // Tester
+  bool is_memnotify_thread() const { return true; }
+
+  // Printing
+  char* name() const { return (char*)"Linux MemNotify Thread"; }
+
+  // Returns the single instance of the MemNotifyThread
+  static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
+
+  // Create and start the single instance of MemNotifyThread
+  static void start();
+};
+#endif // JAVASE_EMBEDDED
+
 // utility functions
 
 static int SR_initialize();
@@ -2085,6 +2113,14 @@
   st->cr();
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  st->print("\n/proc/cpuinfo:\n");
+  if (!_print_ascii_file("/proc/cpuinfo", st)) {
+    st->print("  <Not Available>");
+  }
+  st->cr();
+}
+
 void os::print_memory_info(outputStream* st) {
 
   st->print("Memory:");
@@ -4237,7 +4273,16 @@
 }
 
 // this is called at the end of vm_initialization
-void os::init_3(void) { }
+void os::init_3(void)
+{
+#ifdef JAVASE_EMBEDDED
+  // Start the MemNotifyThread
+  if (LowMemoryProtection) {
+    MemNotifyThread::start();
+  }
+  return;
+#endif
+}
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
@@ -5360,3 +5405,78 @@
     return true;
 }
 
+
+#ifdef JAVASE_EMBEDDED
+//
+// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
+//
+MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
+
+// ctor
+//
+MemNotifyThread::MemNotifyThread(int fd): Thread() {
+  assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
+  _fd = fd;
+
+  if (os::create_thread(this, os::os_thread)) {
+    _memnotify_thread = this;
+    os::set_priority(this, NearMaxPriority);
+    os::start_thread(this);
+  }
+}
+
+// Where all the work gets done
+//
+void MemNotifyThread::run() {
+  assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
+
+  // Set up the select arguments
+  fd_set rfds;
+  if (_fd != -1) {
+    FD_ZERO(&rfds);
+    FD_SET(_fd, &rfds);
+  }
+
+  // Now wait for the mem_notify device to wake up
+  while (1) {
+    // Wait for the mem_notify device to signal us..
+    int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
+    if (rc == -1) {
+      perror("select!\n");
+      break;
+    } else if (rc) {
+      //ssize_t free_before = os::available_memory();
+      //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
+
+      // The kernel is telling us there is not much memory left...
+      // try to do something about that
+
+      // If we are not already in a GC, try one.
+      if (!Universe::heap()->is_gc_active()) {
+        Universe::heap()->collect(GCCause::_allocation_failure);
+
+        //ssize_t free_after = os::available_memory();
+        //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
+        //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
+      }
+      // We might want to do something like the following if we find the GC's are not helping...
+      // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
+    }
+  }
+}
+
+//
+// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
+//
+void MemNotifyThread::start() {
+  int    fd;
+  fd = open ("/dev/mem_notify", O_RDONLY, 0);
+  if (fd < 0) {
+      return;
+  }
+
+  if (memnotify_thread() == NULL) {
+    new MemNotifyThread(fd);
+  }
+}
+#endif // JAVASE_EMBEDDED
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2317,6 +2317,10 @@
   return status;
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  // Nothing to do for now.
+}
+
 void os::print_memory_info(outputStream* st) {
   st->print("Memory:");
   st->print(" %dk page", os::vm_page_size()>>10);
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1720,6 +1720,10 @@
   st->cr();
 }
 
+void os::pd_print_cpu_info(outputStream* st) {
+  // Nothing to do for now.
+}
+
 void os::print_memory_info(outputStream* st) {
   st->print("Memory:");
   st->print(" %dk page", os::vm_page_size()>>10);
--- a/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -33,6 +33,28 @@
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
 }
 
+#ifdef MINIMIZE_RAM_USAGE
+
+void MacroAssembler::get_thread(Register thread) {
+  // call pthread_getspecific
+  // void * pthread_getspecific(pthread_key_t key);
+  if (thread != rax) push(rax);
+  push(rcx);
+  push(rdx);
+
+  push(ThreadLocalStorage::thread_index());
+  call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
+  increment(rsp, wordSize);
+
+  pop(rdx);
+  pop(rcx);
+  if (thread != rax) {
+    mov(thread, rax);
+    pop(rax);
+  }
+}
+
+#else
 void MacroAssembler::get_thread(Register thread) {
   movl(thread, rsp);
   shrl(thread, PAGE_SHIFT);
@@ -43,6 +65,7 @@
 
   movptr(thread, tls);
 }
+#endif // MINIMIZE_RAM_USAGE
 #else
 void MacroAssembler::int3() {
   call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
--- a/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -52,25 +52,20 @@
 // MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
 // physical memory page (i.e. similar to MADV_FREE on Solaris).
 
-#ifndef AMD64
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
 Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
 
 void ThreadLocalStorage::generate_code_for_get_thread() {
     // nothing we can do here for user-level thread
 }
 
 void ThreadLocalStorage::pd_init() {
-#ifndef AMD64
   assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
          "page size must be multiple of PAGE_SIZE");
-#endif // !AMD64
 }
 
 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-
-#ifndef AMD64
   address stack_top = os::current_stack_base();
   size_t stack_size = os::current_stack_size();
 
@@ -88,5 +83,17 @@
            "thread exited without detaching from VM??");
     _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
   }
-#endif // !AMD64
+}
+#else
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+    // nothing we can do here for user-level thread
 }
+
+void ThreadLocalStorage::pd_init() {
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+#endif // !AMD64 && !MINIMIZE_RAM_USAGE
--- a/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -27,28 +27,32 @@
 
   // Processor dependent parts of ThreadLocalStorage
 
-#ifndef AMD64
+#if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
+
   // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
   #define SP_BITLENGTH  32
   #define PAGE_SHIFT    12
   #define PAGE_SIZE     (1UL << PAGE_SHIFT)
   static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
-#endif // !AMD64
 
 public:
 
-#ifndef AMD64
   static Thread** sp_map_addr() { return _sp_map; }
-#endif // !AMD64
 
   static Thread* thread() {
-#ifdef AMD64
-    return (Thread*) os::thread_local_storage_at(thread_index());
-#else
     uintptr_t sp;
     __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
     return _sp_map[sp >> PAGE_SHIFT];
-#endif // AMD64
   }
 
+#else
+
+public:
+
+   static Thread* thread() {
+     return (Thread*) os::thread_local_storage_at(thread_index());
+   }
+
+#endif // AMD64 || MINIMIZE_RAM_USAGE
+
 #endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP
--- a/hotspot/src/share/tools/hsdis/README	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/tools/hsdis/README	Wed Aug 17 22:47:12 2011 -0700
@@ -75,8 +75,16 @@
 * Installing
 
 Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so.  You can
-install them on your LD_LIBRARY_PATH, or inside of your JRE next to
-$LIBARCH/libjvm.so.
+install them on your LD_LIBRARY_PATH, or inside of your JRE/JDK.  The
+search path in the JVM is:
+
+1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
+2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+
+Note that there's a bug in hotspot versions prior to hs22 that causes
+steps 2 and 3 to fail when used with JDK7.
 
 Now test:
 
--- a/hotspot/src/share/vm/adlc/adlparse.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/adlc/adlparse.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2812,6 +2812,13 @@
     params->add_entry(param);
   }
 
+  // Check for duplicate ins_encode sections after parsing the block
+  // so that parsing can continue and find any other errors.
+  if (inst._insencode != NULL) {
+    parse_err(SYNERR, "Multiple ins_encode sections defined\n");
+    return;
+  }
+
   // Set encode class of this instruction.
   inst._insencode = encrule;
 }
@@ -3044,6 +3051,13 @@
   next_char();                     // move past ';'
   skipws();                        // be friendly to oper_parse()
 
+  // Check for duplicate ins_encode sections after parsing the block
+  // so that parsing can continue and find any other errors.
+  if (inst._insencode != NULL) {
+    parse_err(SYNERR, "Multiple ins_encode sections defined\n");
+    return;
+  }
+
   // Debug Stuff
   if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name);
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -33,6 +33,7 @@
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "utilities/bitMap.inline.hpp"
 
 class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
@@ -3395,8 +3396,8 @@
 
 bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
   assert(!callee->is_native(), "callee must not be native");
-  if (count_backedges() && callee->has_loops()) {
-    INLINE_BAILOUT("too complex for tiered");
+  if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
+    INLINE_BAILOUT("inlining prohibited by policy");
   }
   // first perform tests of things it's not possible to inline
   if (callee->has_exception_handlers() &&
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2799,7 +2799,7 @@
 
       // Load CallSite object from constant pool cache.
       __ oop2reg(cpcache->constant_encoding(), tmp);
-      __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
+      __ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
 
       // Load target MethodHandle from CallSite object.
       __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -642,7 +642,7 @@
 void NullCheckVisitor::do_NewTypeArray   (NewTypeArray*    x) { nce()->handle_NewArray(x); }
 void NullCheckVisitor::do_NewObjectArray (NewObjectArray*  x) { nce()->handle_NewArray(x); }
 void NullCheckVisitor::do_NewMultiArray  (NewMultiArray*   x) { nce()->handle_NewArray(x); }
-void NullCheckVisitor::do_CheckCast      (CheckCast*       x) {}
+void NullCheckVisitor::do_CheckCast      (CheckCast*       x) { nce()->clear_last_explicit_null_check(); }
 void NullCheckVisitor::do_InstanceOf     (InstanceOf*      x) {}
 void NullCheckVisitor::do_MonitorEnter   (MonitorEnter*    x) { nce()->handle_AccessMonitor(x); }
 void NullCheckVisitor::do_MonitorExit    (MonitorExit*     x) { nce()->handle_AccessMonitor(x); }
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -383,8 +383,10 @@
   }
 JRT_END
 
-// This is a helper to allow us to safepoint but allow the outer entry
-// to be safepoint free if we need to do an osr
+// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
+// associated with the top activation record. The inlinee (that is possibly included in the enclosing
+// method) method oop is passed as an argument. In order to do that it is embedded in the code as
+// a constant.
 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
   nmethod* osr_nm = NULL;
   methodHandle method(THREAD, m);
@@ -420,7 +422,7 @@
     bci = branch_bci + offset;
   }
 
-  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
+  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
   return osr_nm;
 }
 
--- a/hotspot/src/share/vm/ci/ciCallProfile.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciCallProfile.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -36,6 +36,7 @@
 private:
   // Fields are initialized directly by ciMethod::call_profile_at_bci.
   friend class ciMethod;
+  friend class ciMethodHandle;
 
   enum { MorphismLimit = 2 }; // Max call site's morphism we care about
   int  _limit;                // number of receivers have been determined
@@ -58,10 +59,10 @@
 
 public:
   // Note:  The following predicates return false for invalid profiles:
-  bool      has_receiver(int i) { return _limit > i; }
-  int       morphism()          { return _morphism; }
+  bool      has_receiver(int i) const { return _limit > i; }
+  int       morphism() const          { return _morphism; }
 
-  int       count()             { return _count; }
+  int       count() const             { return _count; }
   int       receiver_count(int i)  {
     assert(i < _limit, "out of Call Profile MorphismLimit");
     return _receiver_count[i];
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -50,6 +50,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "prims/jvmtiExport.hpp"
+#include "prims/methodHandleWalk.hpp"
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -371,6 +372,7 @@
 // ------------------------------------------------------------------
 // ciEnv::get_klass_by_name_impl
 ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
+                                       constantPoolHandle cpool,
                                        ciSymbol* name,
                                        bool require_local) {
   ASSERT_IN_VM;
@@ -386,7 +388,7 @@
                     sym->utf8_length()-2,
                     KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass));
     ciSymbol* strippedname = get_symbol(strippedsym);
-    return get_klass_by_name_impl(accessing_klass, strippedname, require_local);
+    return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
   }
 
   // Check for prior unloaded klass.  The SystemDictionary's answers
@@ -443,6 +445,7 @@
     // Get element ciKlass recursively.
     ciKlass* elem_klass =
       get_klass_by_name_impl(accessing_klass,
+                             cpool,
                              get_symbol(elem_sym),
                              require_local);
     if (elem_klass != NULL && elem_klass->is_loaded()) {
@@ -451,6 +454,19 @@
     }
   }
 
+  if (found_klass() == NULL && !cpool.is_null() && cpool->has_preresolution()) {
+    // Look inside the constant pool for pre-resolved class entries.
+    for (int i = cpool->length() - 1; i >= 1; i--) {
+      if (cpool->tag_at(i).is_klass()) {
+        klassOop kls = cpool->resolved_klass_at(i);
+        if (Klass::cast(kls)->name() == sym) {
+          found_klass = KlassHandle(THREAD, kls);
+          break;
+        }
+      }
+    }
+  }
+
   if (found_klass() != NULL) {
     // Found it.  Build a CI handle.
     return get_object(found_klass())->as_klass();
@@ -468,6 +484,7 @@
                                   ciSymbol* klass_name,
                                   bool require_local) {
   GUARDED_VM_ENTRY(return get_klass_by_name_impl(accessing_klass,
+                                                 constantPoolHandle(),
                                                  klass_name,
                                                  require_local);)
 }
@@ -508,13 +525,14 @@
   if (klass.is_null()) {
     // Not found in constant pool.  Use the name to do the lookup.
     ciKlass* k = get_klass_by_name_impl(accessor,
+                                        cpool,
                                         get_symbol(klass_name),
                                         false);
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
     } else if (k->loader() != accessor->loader() &&
-               get_klass_by_name_impl(accessor, k->name(), true) == NULL) {
+               get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
     } else {
@@ -565,7 +583,7 @@
     index = cpc_entry->constant_pool_index();
     oop obj = cpc_entry->f1();
     if (obj != NULL) {
-      assert(obj->is_instance(), "must be an instance");
+      assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
       ciObject* ciobj = get_object(obj);
       return ciConstant(T_OBJECT, ciobj);
     }
@@ -607,7 +625,7 @@
     return ciConstant(T_OBJECT, klass->java_mirror());
   } else if (tag.is_object()) {
     oop obj = cpool->object_at(index);
-    assert(obj->is_instance(), "must be an instance");
+    assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
     ciObject* ciobj = get_object(obj);
     return ciConstant(T_OBJECT, ciobj);
   } else if (tag.is_method_type()) {
@@ -729,9 +747,35 @@
   Symbol* name_sym = cpool->name_ref_at(index);
   Symbol* sig_sym  = cpool->signature_ref_at(index);
 
+  if (cpool->has_preresolution()
+      || (holder == ciEnv::MethodHandle_klass() &&
+          methodOopDesc::is_method_handle_invoke_name(name_sym))) {
+    // Short-circuit lookups for JSR 292-related call sites.
+    // That is, do not rely only on name-based lookups, because they may fail
+    // if the names are not resolvable in the boot class loader (7056328).
+    switch (bc) {
+    case Bytecodes::_invokevirtual:
+    case Bytecodes::_invokeinterface:
+    case Bytecodes::_invokespecial:
+    case Bytecodes::_invokestatic:
+      {
+        methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc);
+        if (m != NULL) {
+          return get_object(m)->as_method();
+        }
+      }
+    }
+  }
+
   if (holder_is_accessible) { // Our declared holder is loaded.
     instanceKlass* lookup = declared_holder->get_instanceKlass();
     methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
+    if (m != NULL &&
+        (bc == Bytecodes::_invokestatic
+         ?  instanceKlass::cast(m->method_holder())->is_not_initialized()
+         : !instanceKlass::cast(m->method_holder())->is_loaded())) {
+      m = NULL;
+    }
     if (m != NULL) {
       // We found the method.
       return get_object(m)->as_method();
@@ -1046,7 +1090,7 @@
 // ciEnv::find_system_klass
 ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) {
   VM_ENTRY_MARK;
-  return get_klass_by_name_impl(NULL, klass_name, false);
+  return get_klass_by_name_impl(NULL, constantPoolHandle(), klass_name, false);
 }
 
 // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -137,6 +137,7 @@
 
   // Implementation methods for loading and constant pool access.
   ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
+                                  constantPoolHandle cpool,
                                   ciSymbol* klass_name,
                                   bool require_local);
   ciKlass*   get_klass_by_index_impl(constantPoolHandle cpool,
--- a/hotspot/src/share/vm/ci/ciField.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -287,7 +287,7 @@
 }
 
 ciType* ciField::compute_type_impl() {
-  ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, _signature, false);
+  ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, constantPoolHandle(), _signature, false);
   if (!type->is_primitive_type() && is_shared()) {
     // We must not cache a pointer to an unshared type, in a shared field.
     bool type_is_also_shared = false;
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -125,7 +125,8 @@
   _name = env->get_symbol(h_m()->name());
   _holder = env->get_object(h_m()->method_holder())->as_instance_klass();
   ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
-  _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
+  constantPoolHandle cpool = h_m()->constants();
+  _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
   _method_data = NULL;
   // Take a snapshot of these values, so they will be commensurate with the MDO.
   if (ProfileInterpreter || TieredCompilation) {
@@ -152,7 +153,7 @@
   // These fields are always filled in.
   _name = name;
   _holder = holder;
-  _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, signature);
+  _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, constantPoolHandle(), signature);
   _intrinsic_id = vmIntrinsics::_none;
   _liveness = NULL;
   _can_be_statically_bound = false;
@@ -1009,6 +1010,12 @@
   return 0;
 }
 
+int ciMethod::highest_osr_comp_level() {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  return get_methodOop()->highest_osr_comp_level();
+}
+
 // ------------------------------------------------------------------
 // ciMethod::instructions_size
 //
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -158,6 +158,7 @@
   int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
 
   int comp_level();
+  int highest_osr_comp_level();
 
   Bytecodes::Code java_code_at_bci(int bci) {
     address bcp = code() + bci;
--- a/hotspot/src/share/vm/ci/ciMethodHandle.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -41,9 +41,19 @@
   VM_ENTRY_MARK;
   Handle h(get_oop());
   methodHandle callee(_callee->get_methodOop());
+  assert(callee->is_method_handle_invoke(), "");
+  oop mt1 = callee->method_handle_type();
+  oop mt2 = java_lang_invoke_MethodHandle::type(h());
+  if (!java_lang_invoke_MethodType::equals(mt1, mt2)) {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      tty->print_cr("ciMethodHandle::get_adapter: types not equal");
+      mt1->print(); mt2->print();
+    }
+    return NULL;
+  }
   // We catch all exceptions here that could happen in the method
   // handle compiler and stop the VM.
-  MethodHandleCompiler mhc(h, callee, _profile->count(), is_invokedynamic, THREAD);
+  MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD);
   if (!HAS_PENDING_EXCEPTION) {
     methodHandle m = mhc.compile(THREAD);
     if (!HAS_PENDING_EXCEPTION) {
@@ -53,7 +63,7 @@
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
     tty->print("*** ciMethodHandle::get_adapter => ");
     PENDING_EXCEPTION->print();
-    tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@
+    tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print();
   }
   CLEAR_PENDING_EXCEPTION;
   return NULL;
--- a/hotspot/src/share/vm/ci/ciMethodHandle.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -36,7 +36,7 @@
 private:
   ciMethod*      _callee;
   ciMethod*      _caller;
-  ciCallProfile* _profile;
+  ciCallProfile  _profile;
 
   // Return an adapter for this MethodHandle.
   ciMethod* get_adapter_impl(bool is_invokedynamic) const;
@@ -49,8 +49,7 @@
   ciMethodHandle(instanceHandle h_i) :
     ciInstance(h_i),
     _callee(NULL),
-    _caller(NULL),
-    _profile(NULL)
+    _caller(NULL)
   {}
 
   // What kind of ciObject is this?
@@ -58,7 +57,7 @@
 
   void set_callee(ciMethod* m)                  { _callee  = m;       }
   void set_caller(ciMethod* m)                  { _caller  = m;       }
-  void set_call_profile(ciCallProfile* profile) { _profile = profile; }
+  void set_call_profile(ciCallProfile profile)  { _profile = profile; }
 
   // Return an adapter for a MethodHandle call.
   ciMethod* get_method_handle_adapter() const { return get_adapter(false); }
--- a/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -93,6 +93,7 @@
       // element klass by name.
       _element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl(
                           this,
+                          constantPoolHandle(),
                           construct_array_name(base_element_klass()->name(),
                                                dimension() - 1),
                           false);
--- a/hotspot/src/share/vm/ci/ciObject.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciObject.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -187,7 +187,7 @@
 // ciObject::can_be_constant
 bool ciObject::can_be_constant() {
   if (ScavengeRootsInCode >= 1)  return true;  // now everybody can encode as a constant
-  return handle() == NULL || !is_scavengable();
+  return handle() == NULL || is_perm();
 }
 
 // ------------------------------------------------------------------
@@ -204,7 +204,7 @@
       return true;
     }
   }
-  return handle() == NULL || !is_scavengable();
+  return handle() == NULL || is_perm();
 }
 
 
--- a/hotspot/src/share/vm/ci/ciObject.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciObject.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -108,7 +108,7 @@
   int hash();
 
   // Tells if this oop has an encoding as a constant.
-  // True if is_scavengable is false.
+  // True if is_perm is true.
   // Also true if ScavengeRootsInCode is non-zero.
   // If it does not have an encoding, the compiler is responsible for
   // making other arrangements for dealing with the oop.
@@ -116,7 +116,7 @@
   bool can_be_constant();
 
   // Tells if this oop should be made a constant.
-  // True if is_scavengable is false or ScavengeRootsInCode > 1.
+  // True if is_perm is true or ScavengeRootsInCode > 1.
   bool should_be_constant();
 
   // Is this object guaranteed to be in the permanent part of the heap?
--- a/hotspot/src/share/vm/ci/ciSignature.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciSignature.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -35,7 +35,7 @@
 
 // ------------------------------------------------------------------
 // ciSignature::ciSignature
-ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol) {
+ciSignature::ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* symbol) {
   ASSERT_IN_VM;
   EXCEPTION_CONTEXT;
   _accessing_klass = accessing_klass;
@@ -64,7 +64,7 @@
         CLEAR_PENDING_EXCEPTION;
       } else {
         ciSymbol* klass_name = env->get_symbol(name);
-        type = env->get_klass_by_name_impl(_accessing_klass, klass_name, false);
+        type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
       }
     }
     _types->append(type);
--- a/hotspot/src/share/vm/ci/ciSignature.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciSignature.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -44,7 +44,7 @@
 
   friend class ciMethod;
 
-  ciSignature(ciKlass* accessing_klass, ciSymbol* signature);
+  ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
 
   void get_all_klasses();
 
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -3287,9 +3287,9 @@
       // Fields allocation: oops fields in super and sub classes are together.
       if( nonstatic_field_size > 0 && super_klass() != NULL &&
           super_klass->nonstatic_oop_map_size() > 0 ) {
-        int map_size = super_klass->nonstatic_oop_map_size();
+        int map_count = super_klass->nonstatic_oop_map_count();
         OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
-        OopMapBlock* last_map = first_map + map_size - 1;
+        OopMapBlock* last_map = first_map + map_count - 1;
         int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
         if (next_offset == next_nonstatic_field_offset) {
           allocation_style = 0;   // allocate oops first
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1258,7 +1258,6 @@
   objArrayOop     _methods;
   typeArrayOop    _bcis;
   int             _index;
-  bool            _dirty;
   No_Safepoint_Verifier _nsv;
 
  public:
@@ -1272,37 +1271,13 @@
   };
 
   // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) {
+  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
     expand(CHECK);
     _backtrace = _head;
     _index = 0;
   }
 
-  void flush() {
-    // The following appears to have been an optimization to save from
-    // doing a barrier for each individual store into the _methods array,
-    // but rather to do it for the entire array after the series of writes.
-    // That optimization seems to have been lost when compressed oops was
-    // implemented. However, the extra card-marks below was left in place,
-    // but is now redundant because the individual stores into the
-    // _methods array already execute the barrier code. CR 6918185 has
-    // been filed so the original code may be restored by deferring the
-    // barriers until after the entire sequence of stores, thus re-enabling
-    // the intent of the original optimization. In the meantime the redundant
-    // card mark below is now disabled.
-    if (_dirty && _methods != NULL) {
-#if 0
-      BarrierSet* bs = Universe::heap()->barrier_set();
-      assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-      bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
-#endif
-      _dirty = false;
-    }
-  }
-
   void expand(TRAPS) {
-    flush();
-
     objArrayHandle old_head(THREAD, _head);
     Pause_No_Safepoint_Verifier pnsv(&_nsv);
 
@@ -1328,7 +1303,6 @@
   }
 
   oop backtrace() {
-    flush();
     return _backtrace();
   }
 
@@ -1342,7 +1316,6 @@
     _methods->obj_at_put(_index, method);
     _bcis->ushort_at_put(_index, bci);
     _index++;
-    _dirty = true;
   }
 
   methodOop current_method() {
@@ -2574,6 +2547,18 @@
   return name;
 }
 
+bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
+  if (rtype(mt1) != rtype(mt2))
+    return false;
+  if (ptype_count(mt1) != ptype_count(mt2))
+    return false;
+  for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
+    if (ptype(mt1, i) != ptype(mt2, i))
+      return false;
+  }
+  return true;
+}
+
 oop java_lang_invoke_MethodType::rtype(oop mt) {
   assert(is_instance(mt), "must be a MethodType");
   return mt->obj_field(_rtype_offset);
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1079,6 +1079,8 @@
     return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass();
   }
 
+  static bool equals(oop mt1, oop mt2);
+
   // Accessors for code generation:
   static int rtype_offset_in_bytes()            { return _rtype_offset; }
   static int ptypes_offset_in_bytes()           { return _ptypes_offset; }
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2367,6 +2367,8 @@
         // Link m to his method type, if it is suitably generic.
         oop mtform = java_lang_invoke_MethodType::form(mt());
         if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform)
+            // vmlayout must be an invokeExact:
+            && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)
             && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) {
           java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m());
         }
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -152,6 +152,7 @@
   template(DirectMethodHandle_klass,     java_lang_invoke_DirectMethodHandle, Pre_JSR292) \
   template(MethodType_klass,             java_lang_invoke_MethodType,       Pre_JSR292) \
   template(MethodTypeForm_klass,         java_lang_invoke_MethodTypeForm,   Pre_JSR292) \
+  template(BootstrapMethodError_klass,   java_lang_BootstrapMethodError, Pre_JSR292) \
   template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
   template(CallSite_klass,               java_lang_invoke_CallSite,         Pre_JSR292) \
   /* Note: MethodHandle must be first, and CallSite last in group */          \
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -148,6 +148,7 @@
   template(java_lang_InstantiationException,          "java/lang/InstantiationException")         \
   template(java_lang_InstantiationError,              "java/lang/InstantiationError")             \
   template(java_lang_InterruptedException,            "java/lang/InterruptedException")           \
+  template(java_lang_BootstrapMethodError,            "java/lang/BootstrapMethodError")           \
   template(java_lang_LinkageError,                    "java/lang/LinkageError")                   \
   template(java_lang_NegativeArraySizeException,      "java/lang/NegativeArraySizeException")     \
   template(java_lang_NoSuchFieldException,            "java/lang/NoSuchFieldException")           \
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1810,7 +1810,7 @@
   void maybe_print(oop* p) {
     if (_print_nm == NULL)  return;
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
                   (intptr_t)(*p), (intptr_t)p);
     (*p)->print();
@@ -1832,7 +1832,9 @@
   if (!method()->is_native()) {
     SimpleScopeDesc ssd(this, fr.pc());
     Bytecode_invoke call(ssd.method(), ssd.bci());
-    bool has_receiver = call.has_receiver();
+    // compiled invokedynamic call sites have an implicit receiver at
+    // resolution time, so make sure it gets GC'ed.
+    bool has_receiver = !call.is_invokestatic();
     Symbol* signature = call.signature();
     fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
   }
@@ -2311,7 +2313,7 @@
       _nm->print_nmethod(true);
       _ok = false;
     }
-    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
@@ -2324,7 +2326,7 @@
     DebugScavengeRoot debug_scavenge_root(this);
     oops_do(&debug_scavenge_root);
     if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad non-perm oop in the code cache");
+      fatal("found an unadvertised bad scavengable oop in the code cache");
   }
   assert(scavenge_root_not_marked(), "");
 }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -109,7 +109,7 @@
 class nmethod : public CodeBlob {
   friend class VMStructs;
   friend class NMethodSweeper;
-  friend class CodeCache;  // non-perm oops
+  friend class CodeCache;  // scavengable oops
  private:
   // Shared fields for all nmethod's
   methodOop _method;
@@ -466,17 +466,17 @@
   bool is_at_poll_return(address pc);
   bool is_at_poll_or_poll_return(address pc);
 
-  // Non-perm oop support
+  // Scavengable oop support
   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
  protected:
-  enum { npl_on_list = 0x01, npl_marked = 0x10 };
-  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
+  enum { sl_on_list = 0x01, sl_marked = 0x10 };
+  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 #ifndef PRODUCT
-  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
-  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
-  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
+  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
+  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
+  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 #endif //PRODUCT
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
--- a/hotspot/src/share/vm/code/pcDesc.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/code/pcDesc.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -44,7 +44,7 @@
 void PcDesc::print(nmethod* code) {
 #ifndef PRODUCT
   ResourceMark rm;
-  tty->print_cr("PcDesc(pc=0x%lx offset=%x):", real_pc(code), pc_offset());
+  tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits);
 
   if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
     return;
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -300,12 +300,23 @@
   st->print("%7d ", (int) st->time_stamp().milliseconds());  // print timestamp
   st->print("%4d ", compile_id);    // print compilation number
 
+  // For unloaded methods the transition to zombie occurs after the
+  // method is cleared so it's impossible to report accurate
+  // information for that case.
+  bool is_synchronized = false;
+  bool has_exception_handler = false;
+  bool is_native = false;
+  if (method != NULL) {
+    is_synchronized       = method->is_synchronized();
+    has_exception_handler = method->has_exception_handler();
+    is_native             = method->is_native();
+  }
   // method attributes
   const char compile_type   = is_osr_method                   ? '%' : ' ';
-  const char sync_char      = method->is_synchronized()       ? 's' : ' ';
-  const char exception_char = method->has_exception_handler() ? '!' : ' ';
+  const char sync_char      = is_synchronized                 ? 's' : ' ';
+  const char exception_char = has_exception_handler           ? '!' : ' ';
   const char blocking_char  = is_blocking                     ? 'b' : ' ';
-  const char native_char    = method->is_native()             ? 'n' : ' ';
+  const char native_char    = is_native                       ? 'n' : ' ';
 
   // print method attributes
   st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
@@ -316,11 +327,15 @@
   }
   st->print("     ");  // more indent
 
-  method->print_short_name(st);
-  if (is_osr_method) {
-    st->print(" @ %d", osr_bci);
+  if (method == NULL) {
+    st->print("(method)");
+  } else {
+    method->print_short_name(st);
+    if (is_osr_method) {
+      st->print(" @ %d", osr_bci);
+    }
+    st->print(" (%d bytes)", method->code_size());
   }
-  st->print(" (%d bytes)", method->code_size());
 
   if (msg != NULL) {
     st->print("   %s", msg);
--- a/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -78,21 +78,46 @@
   char buf[JVM_MAXPATHLEN];
   os::jvm_path(buf, sizeof(buf));
   int jvm_offset = -1;
+  int lib_offset = -1;
   {
     // Match "jvm[^/]*" in jvm_path.
     const char* base = buf;
     const char* p = strrchr(buf, '/');
+    if (p != NULL) lib_offset = p - base + 1;
     p = strstr(p ? p : base, "jvm");
     if (p != NULL)  jvm_offset = p - base;
   }
+  // Find the disassembler shared library.
+  // Search for several paths derived from libjvm, in this order:
+  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
+  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
   if (jvm_offset >= 0) {
-    // Find the disassembler next to libjvm.so.
+    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
     strcpy(&buf[jvm_offset], hsdis_library_name);
     strcat(&buf[jvm_offset], os::dll_file_extension());
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    if (_library == NULL) {
+      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+      strcpy(&buf[lib_offset], hsdis_library_name);
+      strcat(&buf[lib_offset], os::dll_file_extension());
+      _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    }
+    if (_library == NULL) {
+      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+      buf[lib_offset - 1] = '\0';
+      const char* p = strrchr(buf, '/');
+      if (p != NULL) {
+        lib_offset = p - buf + 1;
+        strcpy(&buf[lib_offset], hsdis_library_name);
+        strcat(&buf[lib_offset], os::dll_file_extension());
+        _library = os::dll_load(buf, ebuf, sizeof ebuf);
+      }
+    }
   }
   if (_library == NULL) {
-    // Try a free-floating lookup.
+    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
     strcpy(&buf[0], hsdis_library_name);
     strcat(&buf[0], os::dll_file_extension());
     _library = os::dll_load(buf, ebuf, sizeof ebuf);
@@ -249,7 +274,13 @@
       return arg;
     }
   } else if (match(event, "mach")) {
-   output()->print_cr("[Disassembling for mach='%s']", arg);
+    static char buffer[32] = { 0, };
+    if (strcmp(buffer, (const char*)arg) != 0 ||
+        strlen((const char*)arg) > sizeof(buffer) - 1) {
+      // Only print this when the mach changes
+      strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
+      output()->print_cr("[Disassembling for mach='%s']", arg);
+    }
   } else if (match(event, "format bytes-per-line")) {
     _bytes_per_line = (int) (intptr_t) arg;
   } else {
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -638,7 +638,9 @@
     assert(*derived_loc != (oop)base_loc, "location already added");
     assert(_list != NULL, "list must exist");
     intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
-    assert(offset >= -1000000, "wrong derived pointer info");
+    // This assert is invalid because derived pointers can be
+    // arbitrarily far away from their base.
+    // assert(offset >= -1000000, "wrong derived pointer info");
 
     if (TraceDerivedPointers) {
       tty->print_cr(
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1833,8 +1833,6 @@
     }
   )
   _indexedFreeList[size].removeChunk(fc);
-  debug_only(fc->clearNext());
-  debug_only(fc->clearPrev());
   NOT_PRODUCT(
     if (FLSVerifyIndexTable) {
       verifyIndexedFreeList(size);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -407,6 +407,11 @@
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
+                             "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
+                             _sweep_limit, bottom(), end());
+    }
   }
   NOT_PRODUCT(
     void clear_sweep_limit() { _sweep_limit = NULL; }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2716,6 +2716,10 @@
   bitMapLock()->unlock();
   releaseFreelistLocks();
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   _between_prologue_and_epilogue = false;  // ready for next cycle
 }
 
@@ -7888,60 +7892,64 @@
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("\n====================\nStarting new sweep\n");
-  }
-}
-
-// We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not yet have added back to
-// the free lists.
+    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
+                        _limit);
+  }
+}
+
+void SweepClosure::print_on(outputStream* st) const {
+  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                _sp->bottom(), _sp->end());
+  tty->print_cr("_limit = " PTR_FORMAT, _limit);
+  tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
+  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
+  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
+                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
+}
+
+#ifndef PRODUCT
+// Assertion checking only:  no useful work in product mode --
+// however, if any of the flags below become product flags,
+// you may need to review this code to see if it needs to be
+// enabled in product mode.
 SweepClosure::~SweepClosure() {
   assert_lock_strong(_freelistLock);
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
-  // Flush any remaining coterminal free run as a single
-  // coalesced chunk to the appropriate free list.
   if (inFreeRange()) {
-    assert(freeFinger() < _limit, "freeFinger points too high");
-    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print("Sweep: last chunk: ");
-      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
-                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
-    }
-  } // else nothing to flush
-  NOT_PRODUCT(
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT " bytes",
-                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
-                             SIZE_FORMAT" bytes  "
-        "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
-        _numObjectsLive, _numWordsLive*sizeof(HeapWord),
-        _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
-        sizeof(HeapWord);
-      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
-
-      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
-        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
-        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
-        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
-        gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
-          indexListReturnedBytes);
-        gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
-          dictReturnedBytes);
-      }
-    }
-  )
-  // Now, in debug mode, just null out the sweep_limit
-  NOT_PRODUCT(_sp->clear_sweep_limit();)
+    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
+    print();
+    ShouldNotReachHere();
+  }
+  if (Verbose && PrintGC) {
+    gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
+                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+    gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
+                           SIZE_FORMAT" bytes  "
+      "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
+      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
+      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
+                        * sizeof(HeapWord);
+    gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
+
+    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
+      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
+      size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
+      size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
+      gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
+      gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
+        indexListReturnedBytes);
+      gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
+        dictReturnedBytes);
+    }
+  }
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("end of sweep\n================\n");
-  }
-}
+    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
+                           _limit);
+  }
+}
+#endif  // PRODUCT
 
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
@@ -8001,15 +8009,17 @@
   // we started the sweep, it may no longer be one because heap expansion
   // may have caused us to coalesce the block ending at the address _limit
   // with a newly expanded chunk (this happens when _limit was set to the
-  // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
+  // previous _end of the space), so we may have stepped past _limit:
+  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
   if (addr >= _limit) { // we have swept up to or past the limit: finish up
     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
            "sweep _limit out of bounds");
     assert(addr < _sp->end(), "addr out of bounds");
-    // Flush any remaining coterminal free run as a single
+    // Flush any free range we might be holding as a single
     // coalesced chunk to the appropriate free list.
     if (inFreeRange()) {
-      assert(freeFinger() < _limit, "finger points too high");
+      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
+             err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
       flush_cur_free_chunk(freeFinger(),
                            pointer_delta(addr, freeFinger()));
       if (CMSTraceSweeper) {
@@ -8033,7 +8043,16 @@
     res = fc->size();
     do_already_free_chunk(fc);
     debug_only(_sp->verifyFreeLists());
-    assert(res == fc->size(), "Don't expect the size to change");
+    // If we flush the chunk at hand in lookahead_and_flush()
+    // and it's coalesced with a preceding chunk, then the
+    // process of "mangling" the payload of the coalesced block
+    // will cause erasure of the size information from the
+    // (erstwhile) header of all the coalesced blocks but the
+    // first, so the first disjunct in the assert will not hold
+    // in that specific case (in which case the second disjunct
+    // will hold).
+    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
+           "Otherwise the size info doesn't change at this step");
     NOT_PRODUCT(
       _numObjectsAlreadyFree++;
       _numWordsAlreadyFree += res;
@@ -8103,7 +8122,7 @@
 //
 
 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
-  size_t size = fc->size();
+  const size_t size = fc->size();
   // Chunks that cannot be coalesced are not in the
   // free lists.
   if (CMSTestInFreeList && !fc->cantCoalesce()) {
@@ -8112,7 +8131,7 @@
   }
   // a chunk that is already free, should not have been
   // marked in the bit map
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const addr = (HeapWord*) fc;
   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
   // Verify that the bit map has no bits marked between
   // addr and purported end of this block.
@@ -8149,7 +8168,7 @@
         }
       } else {
         // the midst of a free range, we are coalescing
-        debug_only(record_free_block_coalesced(fc);)
+        print_free_block_coalesced(fc);
         if (CMSTraceSweeper) {
           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
         }
@@ -8173,6 +8192,10 @@
         }
       }
     }
+    // Note that if the chunk is not coalescable (the else arm
+    // below), we unconditionally flush, without needing to do
+    // a "lookahead," as we do below.
+    if (inFreeRange()) lookahead_and_flush(fc, size);
   } else {
     // Code path common to both original and adaptive free lists.
 
@@ -8191,8 +8214,8 @@
   // This is a chunk of garbage.  It is not in any free list.
   // Add it to a free list or let it possibly be coalesced into
   // a larger chunk.
-  HeapWord* addr = (HeapWord*) fc;
-  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
+  HeapWord* const addr = (HeapWord*) fc;
+  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
 
   if (_sp->adaptive_freelists()) {
     // Verify that the bit map has no bits marked between
@@ -8205,7 +8228,6 @@
       // start of a new free range
       assert(size > 0, "A free range should have a size");
       initialize_free_range(addr, false);
-
     } else {
       // this will be swept up when we hit the end of the
       // free range
@@ -8235,6 +8257,9 @@
     // addr and purported end of just dead object.
     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
   }
+  assert(_limit >= addr + size,
+         "A freshly garbage chunk can't possibly straddle over _limit");
+  if (inFreeRange()) lookahead_and_flush(fc, size);
   return size;
 }
 
@@ -8284,8 +8309,8 @@
            (!_collector->should_unload_classes()
             || oop(addr)->is_parsable()),
            "Should be an initialized object");
-    // Note that there are objects used during class redefinition
-    // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
+    // Note that there are objects used during class redefinition,
+    // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
     // which are discarded with their is_conc_safe state still
     // false.  These object may be floating garbage so may be
     // seen here.  If they are floating garbage their size
@@ -8307,7 +8332,7 @@
                                                  size_t chunkSize) {
   // do_post_free_or_garbage_chunk() should only be called in the case
   // of the adaptive free list allocator.
-  bool fcInFreeLists = fc->isFree();
+  const bool fcInFreeLists = fc->isFree();
   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
   if (CMSTestInFreeList && fcInFreeLists) {
@@ -8318,11 +8343,11 @@
     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
   }
 
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const fc_addr = (HeapWord*) fc;
 
   bool coalesce;
-  size_t left  = pointer_delta(addr, freeFinger());
-  size_t right = chunkSize;
+  const size_t left  = pointer_delta(fc_addr, freeFinger());
+  const size_t right = chunkSize;
   switch (FLSCoalescePolicy) {
     // numeric value forms a coalition aggressiveness metric
     case 0:  { // never coalesce
@@ -8355,15 +8380,15 @@
   // If the chunk is in a free range and either we decided to coalesce above
   // or the chunk is near the large block at the end of the heap
   // (isNearLargestChunk() returns true), then coalesce this chunk.
-  bool doCoalesce = inFreeRange() &&
-    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
+  const bool doCoalesce = inFreeRange()
+                          && (coalesce || _g->isNearLargestChunk(fc_addr));
   if (doCoalesce) {
     // Coalesce the current free range on the left with the new
     // chunk on the right.  If either is on a free list,
     // it must be removed from the list and stashed in the closure.
     if (freeRangeInFreeLists()) {
-      FreeChunk* ffc = (FreeChunk*)freeFinger();
-      assert(ffc->size() == pointer_delta(addr, freeFinger()),
+      FreeChunk* const ffc = (FreeChunk*)freeFinger();
+      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
         "Size of free range is inconsistent with chunk size.");
       if (CMSTestInFreeList) {
         assert(_sp->verifyChunkInFreeLists(ffc),
@@ -8380,13 +8405,14 @@
       _sp->removeFreeChunkFromFreeLists(fc);
     }
     set_lastFreeRangeCoalesced(true);
+    print_free_block_coalesced(fc);
   } else {  // not in a free range and/or should not coalesce
     // Return the current free range and start a new one.
     if (inFreeRange()) {
       // In a free range but cannot coalesce with the right hand chunk.
       // Put the current free range into the free lists.
       flush_cur_free_chunk(freeFinger(),
-                           pointer_delta(addr, freeFinger()));
+                           pointer_delta(fc_addr, freeFinger()));
     }
     // Set up for new free range.  Pass along whether the right hand
     // chunk is in the free lists.
@@ -8394,6 +8420,42 @@
   }
 }
 
+// Lookahead flush:
+// If we are tracking a free range, and this is the last chunk that
+// we'll look at because its end crosses past _limit, we'll preemptively
+// flush it along with any free range we may be holding on to. Note that
+// this can be the case only for an already free or freshly garbage
+// chunk. If this block is an object, it can never straddle
+// over _limit. The "straddling" occurs when _limit is set at
+// the previous end of the space when this cycle started, and
+// a subsequent heap expansion caused the previously co-terminal
+// free block to be coalesced with the newly expanded portion,
+// thus rendering _limit a non-block-boundary making it dangerous
+// for the sweeper to step over and examine.
+void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
+  assert(inFreeRange(), "Should only be called if currently in a free range.");
+  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
+  assert(_sp->used_region().contains(eob - 1),
+         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
+                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+  if (eob >= _limit) {
+    assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
+                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
+                             "[" PTR_FORMAT "," PTR_FORMAT ")",
+                             _limit, fc, eob, _sp->bottom(), _sp->end());
+    }
+    // Return the storage we are tracking back into the free lists.
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("Flushing ... ");
+    }
+    assert(freeFinger() < eob, "Error");
+    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
+  }
+}
+
 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   assert(size > 0,
@@ -8419,6 +8481,8 @@
     }
     _sp->addChunkAndRepairOffsetTable(chunk, size,
             lastFreeRangeCoalesced());
+  } else if (CMSTraceSweeper) {
+    gclog_or_tty->print_cr("Already in free list: nothing to flush");
   }
   set_inFreeRange(false);
   set_freeRangeInFreeLists(false);
@@ -8477,13 +8541,14 @@
 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
   return debug_cms_space->verifyChunkInFreeLists(fc);
 }
-
-void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
+#endif
+
+void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
-  }
-}
-#endif
+    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
+                           fc, fc->size());
+  }
+}
 
 // CMSIsAliveClosure
 bool CMSIsAliveClosure::do_object_b(oop obj) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1701,9 +1701,9 @@
   CMSCollector*                  _collector;  // collector doing the work
   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
   CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;// the address at which the sweep should stop because
-                                        // we do not expect blocks eligible for sweeping past
-                                        // that address.
+  HeapWord*                      _limit;// the address at or above which the sweep should stop
+                                        // because we do not expect newly garbage blocks
+                                        // eligible for sweeping past that address.
   Mutex*                         _freelistLock; // Free list lock (in space)
   CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                 // generation)
@@ -1750,6 +1750,10 @@
   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
   // Process a free chunk during sweeping.
   void do_already_free_chunk(FreeChunk *fc);
+  // Work method called when processing an already free or a
+  // freshly garbage chunk to do a lookahead and possibly a
+  // premptive flush if crossing over _limit.
+  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
   // Process a live chunk during sweeping.
@@ -1758,8 +1762,6 @@
   // Accessors.
   HeapWord* freeFinger() const          { return _freeFinger; }
   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
-  size_t freeRangeSize() const          { return _freeRangeSize; }
-  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
   bool inFreeRange()    const           { return _inFreeRange; }
   void set_inFreeRange(bool v)          { _inFreeRange = v; }
   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
@@ -1779,14 +1781,16 @@
   void do_yield_work(HeapWord* addr);
 
   // Debugging/Printing
-  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
+  void print_free_block_coalesced(FreeChunk* fc) const;
 
  public:
   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
                CMSBitMap* bitMap, bool should_yield);
-  ~SweepClosure();
+  ~SweepClosure() PRODUCT_RETURN;
 
   size_t       do_blk_careful(HeapWord* addr);
+  void         print() const { print_on(tty); }
+  void         print_on(outputStream *st) const;
 };
 
 // Closures related to weak references processing
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -114,17 +114,11 @@
     linkNext(ptr);
     if (ptr != NULL) ptr->linkPrev(this);
   }
-  void linkAfterNonNull(FreeChunk* ptr) {
-    assert(ptr != NULL, "precondition violation");
-    linkNext(ptr);
-    ptr->linkPrev(this);
-  }
   void linkNext(FreeChunk* ptr) { _next = ptr; }
   void linkPrev(FreeChunk* ptr) {
     LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
     _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
   }
-  void clearPrev()              { _prev = NULL; }
   void clearNext()              { _next = NULL; }
   void markNotFree() {
     // Set _prev (klass) to null before (if) clearing the mark word below
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -300,8 +300,21 @@
   // dictionary for example, this might be the first block and
   // in that case there would be no place that we could record
   // the stats (which are kept in the block itself).
-  assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1   // Total Stock + 1
-          >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
+  assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
+          + _allocation_stats.coalBirths() + 1)   // Total Production Stock + 1
+         >= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
+             + (ssize_t)count()),                // Total Current Stock + depletion
+         err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
+                 " violates Conservation Principle: "
+                 "prevSweep(" SIZE_FORMAT ")"
+                 " + splitBirths(" SIZE_FORMAT ")"
+                 " + coalBirths(" SIZE_FORMAT ") + 1 >= "
+                 " splitDeaths(" SIZE_FORMAT ")"
+                 " coalDeaths(" SIZE_FORMAT ")"
+                 " + count(" SSIZE_FORMAT ")",
+                 this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
+                 _allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
+                 _allocation_stats.coalDeaths(), count()));
 }
 
 void FreeList::assert_proper_lock_protection_work() const {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -24,10 +24,11 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
-#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -69,7 +70,9 @@
   addr = (HeapWord*)align_size_up((intptr_t)addr,
                                   HeapWordSize << _shifter);
   size_t addrOffset = heapWordToOffset(addr);
-  if (limit == NULL) limit = _bmStartWord + _bmWordSize;
+  if (limit == NULL) {
+    limit = _bmStartWord + _bmWordSize;
+  }
   size_t limitOffset = heapWordToOffset(limit);
   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
@@ -82,7 +85,9 @@
 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
                                                  HeapWord* limit) const {
   size_t addrOffset = heapWordToOffset(addr);
-  if (limit == NULL) limit = _bmStartWord + _bmWordSize;
+  if (limit == NULL) {
+    limit = _bmStartWord + _bmWordSize;
+  }
   size_t limitOffset = heapWordToOffset(limit);
   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
@@ -176,18 +181,20 @@
 
 void CMMarkStack::allocate(size_t size) {
   _base = NEW_C_HEAP_ARRAY(oop, size);
-  if (_base == NULL)
+  if (_base == NULL) {
     vm_exit_during_initialization("Failed to allocate "
                                   "CM region mark stack");
+  }
   _index = 0;
-  // QQQQ cast ...
   _capacity = (jint) size;
   _oops_do_bound = -1;
   NOT_PRODUCT(_max_depth = 0);
 }
 
 CMMarkStack::~CMMarkStack() {
-  if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
+  if (_base != NULL) {
+    FREE_C_HEAP_ARRAY(oop, _base);
+  }
 }
 
 void CMMarkStack::par_push(oop ptr) {
@@ -280,16 +287,17 @@
 
 void CMRegionStack::allocate(size_t size) {
   _base = NEW_C_HEAP_ARRAY(MemRegion, size);
-  if (_base == NULL)
-    vm_exit_during_initialization("Failed to allocate "
-                                  "CM region mark stack");
+  if (_base == NULL) {
+    vm_exit_during_initialization("Failed to allocate CM region mark stack");
+  }
   _index = 0;
-  // QQQQ cast ...
   _capacity = (jint) size;
 }
 
 CMRegionStack::~CMRegionStack() {
-  if (_base != NULL) FREE_C_HEAP_ARRAY(oop, _base);
+  if (_base != NULL) {
+    FREE_C_HEAP_ARRAY(oop, _base);
+  }
 }
 
 void CMRegionStack::push_lock_free(MemRegion mr) {
@@ -421,7 +429,8 @@
     // the ones in CMS generation.
     newOop->oop_iterate(cl);
     if (yield_after && _cm->do_yield_check()) {
-      res = false; break;
+      res = false;
+      break;
     }
   }
   debug_only(_drain_in_progress = false);
@@ -492,19 +501,20 @@
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
 
-  _parallel_workers(NULL)
-{
-  CMVerboseLevel verbose_level =
-    (CMVerboseLevel) G1MarkingVerboseLevel;
-  if (verbose_level < no_verbose)
+  _parallel_workers(NULL) {
+  CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
+  if (verbose_level < no_verbose) {
     verbose_level = no_verbose;
-  if (verbose_level > high_verbose)
+  }
+  if (verbose_level > high_verbose) {
     verbose_level = high_verbose;
+  }
   _verbose_level = verbose_level;
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
+  }
 
   _markStack.allocate(MarkStackSize);
   _regionStack.allocate(G1MarkRegionStackSize);
@@ -580,10 +590,11 @@
       _marking_task_overhead    = 1.0;
     }
 
-    if (parallel_marking_threads() > 1)
+    if (parallel_marking_threads() > 1) {
       _cleanup_task_overhead = 1.0;
-    else
+    } else {
       _cleanup_task_overhead = marking_task_overhead();
+    }
     _cleanup_sleep_factor =
                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 
@@ -621,8 +632,7 @@
   // at the beginning of remark to be false. By ensuring that we do
   // not observe heap expansions after marking is complete, then we do
   // not have this problem.
-  if (!concurrent_marking_in_progress() && !force)
-    return;
+  if (!concurrent_marking_in_progress() && !force) return;
 
   MemRegion committed = _g1h->g1_committed();
   assert(committed.start() == _heap_start, "start shouldn't change");
@@ -655,8 +665,9 @@
   // reset all the marking data structures and any necessary flags
   clear_marking_state();
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] resetting");
+  }
 
   // We do reset all of them, since different phases will use
   // different number of active threads. So, it's easiest to have all
@@ -742,8 +753,9 @@
   size_t chunkSize = M;
   while (cur < end) {
     HeapWord* next = cur + chunkSize;
-    if (next > end)
+    if (next > end) {
       next = end;
+    }
     MemRegion mr(cur,next);
     _nextMarkBitMap->clearRange(mr);
     cur = next;
@@ -781,7 +793,7 @@
 #ifndef PRODUCT
   if (G1PrintReachableAtInitialMark) {
     print_reachable("at-cycle-start",
-                    true /* use_prev_marking */, true /* all */);
+                    VerifyOption_G1UsePrevMarking, true /* all */);
   }
 #endif
 
@@ -922,8 +934,9 @@
  */
 
 void ConcurrentMark::enter_first_sync_barrier(int task_num) {
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
+  }
 
   if (concurrent()) {
     ConcurrentGCThread::stsLeave();
@@ -935,8 +948,9 @@
   // at this point everyone should have synced up and not be doing any
   // more work
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] leaving first barrier", task_num);
+  }
 
   // let task 0 do this
   if (task_num == 0) {
@@ -960,8 +974,9 @@
 }
 
 void ConcurrentMark::enter_second_sync_barrier(int task_num) {
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
+  }
 
   if (concurrent()) {
     ConcurrentGCThread::stsLeave();
@@ -972,8 +987,9 @@
   }
   // at this point everything should be re-initialised and ready to go
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
+  }
 }
 
 #ifndef PRODUCT
@@ -1012,8 +1028,9 @@
   assert(_g1h->g1_committed().contains(addr),
          "address should be within the heap bounds");
 
-  if (!_nextMarkBitMap->isMarked(addr))
+  if (!_nextMarkBitMap->isMarked(addr)) {
     _nextMarkBitMap->parMark(addr);
+  }
 }
 
 void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
@@ -1021,17 +1038,19 @@
   // the caller. We only need to decide whether to push the region on
   // the region stack or not.
 
-  if (!concurrent_marking_in_progress() || !_should_gray_objects)
+  if (!concurrent_marking_in_progress() || !_should_gray_objects) {
     // We're done with marking and waiting for remark. We do not need to
     // push anything else on the region stack.
     return;
+  }
 
   HeapWord* finger = _finger;
 
-  if (verbose_low())
+  if (verbose_low()) {
     gclog_or_tty->print_cr("[global] attempting to push "
                            "region ["PTR_FORMAT", "PTR_FORMAT"), finger is at "
                            PTR_FORMAT, mr.start(), mr.end(), finger);
+  }
 
   if (mr.start() < finger) {
     // The finger is always heap region aligned and it is not possible
@@ -1045,14 +1064,16 @@
            "region boundaries should fall within the committed space");
     assert(mr.end() <= _heap_end,
            "region boundaries should fall within the committed space");
-    if (verbose_low())
+    if (verbose_low()) {
       gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
                              "below the finger, pushing it",
                              mr.start(), mr.end());
+    }
 
     if (!region_stack_push_lock_free(mr)) {
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[global] region stack has overflown.");
+      }
     }
   }
 }
@@ -1066,10 +1087,11 @@
     // We definitely need to mark it, irrespective whether we bail out
     // because we're done with marking.
     if (_nextMarkBitMap->parMark(addr)) {
-      if (!concurrent_marking_in_progress() || !_should_gray_objects)
+      if (!concurrent_marking_in_progress() || !_should_gray_objects) {
         // If we're done with concurrent marking and we're waiting for
         // remark, then we're not pushing anything on the stack.
         return;
+      }
 
       // No OrderAccess:store_load() is needed. It is implicit in the
       // CAS done in parMark(addr) above
@@ -1077,9 +1099,10 @@
 
       if (addr < finger) {
         if (!mark_stack_push(oop(addr))) {
-          if (verbose_low())
+          if (verbose_low()) {
             gclog_or_tty->print_cr("[global] global stack overflow "
                                    "during parMark");
+          }
         }
       }
     }
@@ -1174,10 +1197,11 @@
   set_phase(active_workers, true /* concurrent */);
 
   CMConcurrentMarkingTask markingTask(this, cmThread());
-  if (parallel_marking_threads() > 0)
+  if (parallel_marking_threads() > 0) {
     _parallel_workers->run_task(&markingTask);
-  else
+  } else {
     markingTask.work(0);
+  }
   print_stats();
 }
 
@@ -1199,7 +1223,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(true, false, true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -1218,8 +1244,9 @@
     _restart_for_overflow = true;
     // Clear the flag. We do not need it any more.
     clear_has_overflown();
-    if (G1TraceMarkStackOverflow)
+    if (G1TraceMarkStackOverflow) {
       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
+    }
   } else {
     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
     // We're done with marking.
@@ -1232,9 +1259,9 @@
       HandleMark hm;  // handle scope
       gclog_or_tty->print(" VerifyDuringGC:(after)");
       Universe::heap()->prepare_for_verify();
-      Universe::heap()->verify(/* allow_dirty */      true,
-                               /* silent */           false,
-                               /* use_prev_marking */ false);
+      Universe::verify(/* allow dirty */ true,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UseNextMarking);
     }
     assert(!restart_for_overflow(), "sanity");
   }
@@ -1326,9 +1353,7 @@
       size_t end_index = index + 1;
       while (end_index < g1h->n_regions()) {
         HeapRegion* chr = g1h->region_at(end_index);
-        if (!chr->continuesHumongous()) {
-          break;
-        }
+        if (!chr->continuesHumongous()) break;
         end_index += 1;
       }
       _region_bm->par_at_put_range((BitMap::idx_t) index,
@@ -1337,8 +1362,9 @@
   }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (!_final && _regions_done == 0)
+    if (!_final && _regions_done == 0) {
       _start_vtime_sec = os::elapsedVTime();
+    }
 
     if (hr->continuesHumongous()) {
       // We will ignore these here and process them when their
@@ -1431,8 +1457,9 @@
       _changed = true;
     }
     // Handle the last range, if any.
-    if (start_card_num != -1)
+    if (start_card_num != -1) {
       mark_card_num_range(start_card_num, last_card_num);
+    }
     if (_final) {
       // Mark the allocated-since-marking portion...
       HeapWord* tp = hr->top();
@@ -1509,14 +1536,14 @@
   BitMap* _card_bm;
 public:
   G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
-                      BitMap* region_bm, BitMap* card_bm) :
-    AbstractGangTask("G1 final counting"), _g1h(g1h),
-    _bm(bm), _region_bm(region_bm), _card_bm(card_bm)
-  {
-    if (ParallelGCThreads > 0)
+                      BitMap* region_bm, BitMap* card_bm)
+    : AbstractGangTask("G1 final counting"), _g1h(g1h),
+      _bm(bm), _region_bm(region_bm), _card_bm(card_bm) {
+    if (ParallelGCThreads > 0) {
       _n_workers = _g1h->workers()->total_workers();
-    else
+    } else {
       _n_workers = 1;
+    }
     _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
     _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
   }
@@ -1628,6 +1655,23 @@
       _max_live_bytes += g1_note_end.max_live_bytes();
       _freed_bytes += g1_note_end.freed_bytes();
 
+      // If we iterate over the global cleanup list at the end of
+      // cleanup to do this printing we will not guarantee to only
+      // generate output for the newly-reclaimed regions (the list
+      // might not be empty at the beginning of cleanup; we might
+      // still be working on its previous contents). So we do the
+      // printing here, before we append the new regions to the global
+      // cleanup list.
+
+      G1HRPrinter* hr_printer = _g1h->hr_printer();
+      if (hr_printer->is_active()) {
+        HeapRegionLinkedListIterator iter(&local_cleanup_list);
+        while (iter.more_available()) {
+          HeapRegion* hr = iter.get_next();
+          hr_printer->cleanup(hr);
+        }
+      }
+
       _cleanup_list->add_as_tail(&local_cleanup_list);
       assert(local_cleanup_list.is_empty(), "post-condition");
 
@@ -1701,7 +1745,9 @@
                               true /* par */);
     double region_time = (os::elapsedTime() - start);
     _claimed_region_time += region_time;
-    if (region_time > _max_region_time) _max_region_time = region_time;
+    if (region_time > _max_region_time) {
+      _max_region_time = region_time;
+    }
   }
   return false;
 }
@@ -1724,9 +1770,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* allow dirty  */ true,
-                     /* silent       */ false,
-                     /* prev marking */ true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
@@ -1872,9 +1918,9 @@
     HandleMark hm;  // handle scope
     gclog_or_tty->print(" VerifyDuringGC:(after)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* allow dirty  */ true,
-                     /* silent       */ false,
-                     /* prev marking */ true);
+    Universe::verify(/* allow dirty */ true,
+                     /* silent      */ false,
+                     /* option      */ VerifyOption_G1UsePrevMarking);
   }
 
   g1h->verify_region_sets_optional();
@@ -1960,10 +2006,11 @@
     oop obj = oopDesc::load_decode_heap_oop(p);
     HeapWord* addr = (HeapWord*)obj;
 
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("\t[0] we're looking at location "
-                               "*"PTR_FORMAT" = "PTR_FORMAT,
-                               p, (void*) obj);
+                             "*"PTR_FORMAT" = "PTR_FORMAT,
+                             p, (void*) obj);
+    }
 
     if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
       _bitMap->mark(addr);
@@ -2025,10 +2072,11 @@
   template <class T> void do_oop_work(T* p) {
     if (!_cm->has_overflown()) {
       oop obj = oopDesc::load_decode_heap_oop(p);
-      if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
         gclog_or_tty->print_cr("\t[%d] we're looking at location "
                                "*"PTR_FORMAT" = "PTR_FORMAT,
                                _task->task_id(), p, (void*) obj);
+      }
 
       _task->deal_with_reference(obj);
       _ref_counter--;
@@ -2055,8 +2103,9 @@
         _ref_counter = _ref_counter_limit;
       }
     } else {
-       if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
          gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
+      }
     }
   }
 };
@@ -2071,8 +2120,10 @@
 
   void do_void() {
     do {
-      if (_cm->verbose_high())
-        gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id());
+      if (_cm->verbose_high()) {
+        gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step",
+                               _task->task_id());
+      }
 
       // We call CMTask::do_marking_step() to completely drain the local and
       // global marking stacks. The routine is called in a loop, which we'll
@@ -2343,18 +2394,16 @@
 class PrintReachableOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
-  CMBitMapRO*      _bitmap;
   outputStream*    _out;
-  bool             _use_prev_marking;
+  VerifyOption     _vo;
   bool             _all;
 
 public:
-  PrintReachableOopClosure(CMBitMapRO*   bitmap,
-                           outputStream* out,
-                           bool          use_prev_marking,
+  PrintReachableOopClosure(outputStream* out,
+                           VerifyOption  vo,
                            bool          all) :
     _g1h(G1CollectedHeap::heap()),
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
+    _out(out), _vo(vo), _all(all) { }
 
   void do_oop(narrowOop* p) { do_oop_work(p); }
   void do_oop(      oop* p) { do_oop_work(p); }
@@ -2372,12 +2421,23 @@
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
       guarantee(hr != NULL, "invariant");
       bool over_tams = false;
-      if (_use_prev_marking) {
-        over_tams = hr->obj_allocated_since_prev_marking(obj);
-      } else {
-        over_tams = hr->obj_allocated_since_next_marking(obj);
+      bool marked = false;
+
+      switch (_vo) {
+        case VerifyOption_G1UsePrevMarking:
+          over_tams = hr->obj_allocated_since_prev_marking(obj);
+          marked = _g1h->isMarkedPrev(obj);
+          break;
+        case VerifyOption_G1UseNextMarking:
+          over_tams = hr->obj_allocated_since_next_marking(obj);
+          marked = _g1h->isMarkedNext(obj);
+          break;
+        case VerifyOption_G1UseMarkWord:
+          marked = obj->is_gc_marked();
+          break;
+        default:
+          ShouldNotReachHere();
       }
-      bool marked = _bitmap->isMarked((HeapWord*) obj);
 
       if (over_tams) {
         str = " >";
@@ -2398,35 +2458,45 @@
 
 class PrintReachableObjectClosure : public ObjectClosure {
 private:
-  CMBitMapRO*   _bitmap;
-  outputStream* _out;
-  bool          _use_prev_marking;
-  bool          _all;
-  HeapRegion*   _hr;
+  G1CollectedHeap* _g1h;
+  outputStream*    _out;
+  VerifyOption     _vo;
+  bool             _all;
+  HeapRegion*      _hr;
 
 public:
-  PrintReachableObjectClosure(CMBitMapRO*   bitmap,
-                              outputStream* out,
-                              bool          use_prev_marking,
+  PrintReachableObjectClosure(outputStream* out,
+                              VerifyOption  vo,
                               bool          all,
                               HeapRegion*   hr) :
-    _bitmap(bitmap), _out(out),
-    _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
+    _g1h(G1CollectedHeap::heap()),
+    _out(out), _vo(vo), _all(all), _hr(hr) { }
 
   void do_object(oop o) {
-    bool over_tams;
-    if (_use_prev_marking) {
-      over_tams = _hr->obj_allocated_since_prev_marking(o);
-    } else {
-      over_tams = _hr->obj_allocated_since_next_marking(o);
+    bool over_tams = false;
+    bool marked = false;
+
+    switch (_vo) {
+      case VerifyOption_G1UsePrevMarking:
+        over_tams = _hr->obj_allocated_since_prev_marking(o);
+        marked = _g1h->isMarkedPrev(o);
+        break;
+      case VerifyOption_G1UseNextMarking:
+        over_tams = _hr->obj_allocated_since_next_marking(o);
+        marked = _g1h->isMarkedNext(o);
+        break;
+      case VerifyOption_G1UseMarkWord:
+        marked = o->is_gc_marked();
+        break;
+      default:
+        ShouldNotReachHere();
     }
-    bool marked = _bitmap->isMarked((HeapWord*) o);
     bool print_it = _all || over_tams || marked;
 
     if (print_it) {
       _out->print_cr(" "PTR_FORMAT"%s",
                      o, (over_tams) ? " >" : (marked) ? " M" : "");
-      PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
+      PrintReachableOopClosure oopCl(_out, _vo, _all);
       o->oop_iterate(&oopCl);
     }
   }
@@ -2434,9 +2504,8 @@
 
 class PrintReachableRegionClosure : public HeapRegionClosure {
 private:
-  CMBitMapRO*   _bitmap;
   outputStream* _out;
-  bool          _use_prev_marking;
+  VerifyOption  _vo;
   bool          _all;
 
 public:
@@ -2445,10 +2514,21 @@
     HeapWord* e = hr->end();
     HeapWord* t = hr->top();
     HeapWord* p = NULL;
-    if (_use_prev_marking) {
-      p = hr->prev_top_at_mark_start();
-    } else {
-      p = hr->next_top_at_mark_start();
+
+    switch (_vo) {
+      case VerifyOption_G1UsePrevMarking:
+        p = hr->prev_top_at_mark_start();
+        break;
+      case VerifyOption_G1UseNextMarking:
+        p = hr->next_top_at_mark_start();
+        break;
+      case VerifyOption_G1UseMarkWord:
+        // When we are verifying marking using the mark word
+        // TAMS has no relevance.
+        assert(p == NULL, "post-condition");
+        break;
+      default:
+        ShouldNotReachHere();
     }
     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
                    "TAMS: "PTR_FORMAT, b, e, t, p);
@@ -2460,8 +2540,7 @@
     if (to > from) {
       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
       _out->cr();
-      PrintReachableObjectClosure ocl(_bitmap, _out,
-                                      _use_prev_marking, _all, hr);
+      PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
       _out->cr();
     }
@@ -2469,15 +2548,25 @@
     return false;
   }
 
-  PrintReachableRegionClosure(CMBitMapRO*   bitmap,
-                              outputStream* out,
-                              bool          use_prev_marking,
+  PrintReachableRegionClosure(outputStream* out,
+                              VerifyOption  vo,
                               bool          all) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
+    _out(out), _vo(vo), _all(all) { }
 };
 
+static const char* verify_option_to_tams(VerifyOption vo) {
+  switch (vo) {
+    case VerifyOption_G1UsePrevMarking:
+      return "PTAMS";
+    case VerifyOption_G1UseNextMarking:
+      return "NTAMS";
+    default:
+      return "NONE";
+  }
+}
+
 void ConcurrentMark::print_reachable(const char* str,
-                                     bool use_prev_marking,
+                                     VerifyOption vo,
                                      bool all) {
   gclog_or_tty->cr();
   gclog_or_tty->print_cr("== Doing heap dump... ");
@@ -2504,20 +2593,12 @@
   }
 
   outputStream* out = &fout;
-
-  CMBitMapRO* bitmap = NULL;
-  if (use_prev_marking) {
-    bitmap = _prevMarkBitMap;
-  } else {
-    bitmap = _nextMarkBitMap;
-  }
-
-  out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
+  out->print_cr("-- USING %s", verify_option_to_tams(vo));
   out->cr();
 
   out->print_cr("--- ITERATING OVER REGIONS");
   out->cr();
-  PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
+  PrintReachableRegionClosure rcl(out, vo, all);
   _g1h->heap_region_iterate(&rcl);
   out->cr();
 
@@ -2546,34 +2627,42 @@
 };
 
 void ConcurrentMark::deal_with_reference(oop obj) {
-  if (verbose_high())
+  if (verbose_high()) {
     gclog_or_tty->print_cr("[global] we're dealing with reference "PTR_FORMAT,
                            (void*) obj);
-
+  }
 
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   if (_g1h->is_in_g1_reserved(objAddr)) {
-    assert(obj != NULL, "is_in_g1_reserved should ensure this");
-    HeapRegion* hr = _g1h->heap_region_containing(obj);
-    if (_g1h->is_obj_ill(obj, hr)) {
-      if (verbose_high())
-        gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
-                               "marked", (void*) obj);
-
-      // we need to mark it first
-      if (_nextMarkBitMap->parMark(objAddr)) {
-        // No OrderAccess:store_load() is needed. It is implicit in the
-        // CAS done in parMark(objAddr) above
-        HeapWord* finger = _finger;
-        if (objAddr < finger) {
-          if (verbose_high())
-            gclog_or_tty->print_cr("[global] below the global finger "
-                                   "("PTR_FORMAT"), pushing it", finger);
-          if (!mark_stack_push(obj)) {
-            if (verbose_low())
-              gclog_or_tty->print_cr("[global] global stack overflow during "
-                                     "deal_with_reference");
+    assert(obj != NULL, "null check is implicit");
+    if (!_nextMarkBitMap->isMarked(objAddr)) {
+      // Only get the containing region if the object is not marked on the
+      // bitmap (otherwise, it's a waste of time since we won't do
+      // anything with it).
+      HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+      if (!hr->obj_allocated_since_next_marking(obj)) {
+        if (verbose_high()) {
+          gclog_or_tty->print_cr("[global] "PTR_FORMAT" is not considered "
+                                 "marked", (void*) obj);
+        }
+
+        // we need to mark it first
+        if (_nextMarkBitMap->parMark(objAddr)) {
+          // No OrderAccess:store_load() is needed. It is implicit in the
+          // CAS done in parMark(objAddr) above
+          HeapWord* finger = _finger;
+          if (objAddr < finger) {
+            if (verbose_high()) {
+              gclog_or_tty->print_cr("[global] below the global finger "
+                                     "("PTR_FORMAT"), pushing it", finger);
+            }
+            if (!mark_stack_push(obj)) {
+              if (verbose_low()) {
+                gclog_or_tty->print_cr("[global] global stack overflow during "
+                                       "deal_with_reference");
+              }
+            }
           }
         }
       }
@@ -2587,8 +2676,9 @@
   satb_mq_set.set_closure(&oc);
 
   while (satb_mq_set.apply_closure_to_completed_buffer()) {
-    if (verbose_medium())
+    if (verbose_medium()) {
       gclog_or_tty->print_cr("[global] processed an SATB buffer");
+    }
   }
 
   // no need to check whether we should do this, as this is only
@@ -2631,21 +2721,43 @@
   while (finger < _heap_end) {
     assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
-    // is the gap between reading the finger and doing the CAS too long?
-
-    HeapRegion* curr_region   = _g1h->heap_region_containing(finger);
+    // Note on how this code handles humongous regions. In the
+    // normal case the finger will reach the start of a "starts
+    // humongous" (SH) region. Its end will either be the end of the
+    // last "continues humongous" (CH) region in the sequence, or the
+    // standard end of the SH region (if the SH is the only region in
+    // the sequence). That way claim_region() will skip over the CH
+    // regions. However, there is a subtle race between a CM thread
+    // executing this method and a mutator thread doing a humongous
+    // object allocation. The two are not mutually exclusive as the CM
+    // thread does not need to hold the Heap_lock when it gets
+    // here. So there is a chance that claim_region() will come across
+    // a free region that's in the progress of becoming a SH or a CH
+    // region. In the former case, it will either
+    //   a) Miss the update to the region's end, in which case it will
+    //      visit every subsequent CH region, will find their bitmaps
+    //      empty, and do nothing, or
+    //   b) Will observe the update of the region's end (in which case
+    //      it will skip the subsequent CH regions).
+    // If it comes across a region that suddenly becomes CH, the
+    // scenario will be similar to b). So, the race between
+    // claim_region() and a humongous object allocation might force us
+    // to do a bit of unnecessary work (due to some unnecessary bitmap
+    // iterations) but it should not introduce and correctness issues.
+    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
     HeapWord*   bottom        = curr_region->bottom();
     HeapWord*   end           = curr_region->end();
     HeapWord*   limit         = curr_region->next_top_at_mark_start();
 
-    if (verbose_low())
+    if (verbose_low()) {
       gclog_or_tty->print_cr("[%d] curr_region = "PTR_FORMAT" "
                              "["PTR_FORMAT", "PTR_FORMAT"), "
                              "limit = "PTR_FORMAT,
                              task_num, curr_region, bottom, end, limit);
-
-    HeapWord* res =
-      (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
+    }
+
+    // Is the gap between reading the finger and doing the CAS too long?
+    HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
     if (res == finger) {
       // we succeeded
 
@@ -2653,32 +2765,36 @@
       // someone else might have moved the finger even further
       assert(_finger >= end, "the finger should have moved forward");
 
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[%d] we were successful with region = "
                                PTR_FORMAT, task_num, curr_region);
+      }
 
       if (limit > bottom) {
-        if (verbose_low())
+        if (verbose_low()) {
           gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is not empty, "
                                  "returning it ", task_num, curr_region);
+        }
         return curr_region;
       } else {
         assert(limit == bottom,
                "the region limit should be at bottom");
-        if (verbose_low())
+        if (verbose_low()) {
           gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
                                  "returning NULL", task_num, curr_region);
+        }
         // we return NULL and the caller should try calling
         // claim_region() again.
         return NULL;
       }
     } else {
       assert(_finger > finger, "the finger should have moved forward");
-      if (verbose_low())
+      if (verbose_low()) {
         gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
                                "global finger = "PTR_FORMAT", "
                                "our finger = "PTR_FORMAT,
                                task_num, _finger, finger);
+      }
 
       // read it again
       finger = _finger;
@@ -2722,18 +2838,20 @@
 }
 
 void ConcurrentMark::oops_do(OopClosure* cl) {
-  if (_markStack.size() > 0 && verbose_low())
+  if (_markStack.size() > 0 && verbose_low()) {
     gclog_or_tty->print_cr("[global] scanning the global marking stack, "
                            "size = %d", _markStack.size());
+  }
   // we first iterate over the contents of the mark stack...
   _markStack.oops_do(cl);
 
   for (int i = 0; i < (int)_max_task_num; ++i) {
     OopTaskQueue* queue = _task_queues->queue((int)i);
 
-    if (queue->size() > 0 && verbose_low())
+    if (queue->size() > 0 && verbose_low()) {
       gclog_or_tty->print_cr("[global] scanning task queue of task %d, "
                              "size = %d", i, queue->size());
+    }
 
     // ...then over the contents of the all the task queues.
     queue->oops_do(cl);
@@ -2805,14 +2923,17 @@
       return false;
     }
     _ms[_ms_ind] = obj;
-    if (obj->is_objArray()) _array_ind_stack[_ms_ind] = arr_ind;
+    if (obj->is_objArray()) {
+      _array_ind_stack[_ms_ind] = arr_ind;
+    }
     _ms_ind++;
     return true;
   }
 
   oop pop() {
-    if (_ms_ind == 0) return NULL;
-    else {
+    if (_ms_ind == 0) {
+      return NULL;
+    } else {
       _ms_ind--;
       return _ms[_ms_ind];
     }
@@ -3011,17 +3132,19 @@
 // newCSet().
 
 void ConcurrentMark::newCSet() {
-  if (!concurrent_marking_in_progress())
+  if (!concurrent_marking_in_progress()) {
     // nothing to do if marking is not in progress
     return;
+  }
 
   // find what the lowest finger is among the global and local fingers
   _min_finger = _finger;
   for (int i = 0; i < (int)_max_task_num; ++i) {
     CMTask* task = _tasks[i];
     HeapWord* task_finger = task->finger();
-    if (task_finger != NULL && task_finger < _min_finger)
+    if (task_finger != NULL && task_finger < _min_finger) {
       _min_finger = task_finger;
+    }
   }
 
   _should_gray_objects = false;
@@ -3041,17 +3164,40 @@
   // irrespective whether all collection set regions are below the
   // finger, if the region stack is not empty. This is expected to be
   // a rare case, so I don't think it's necessary to be smarted about it.
-  if (!region_stack_empty() || has_aborted_regions())
+  if (!region_stack_empty() || has_aborted_regions()) {
     _should_gray_objects = true;
+  }
 }
 
 void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
-  if (!concurrent_marking_in_progress())
-    return;
+  if (!concurrent_marking_in_progress()) return;
 
   HeapWord* region_end = hr->end();
-  if (region_end > _min_finger)
+  if (region_end > _min_finger) {
     _should_gray_objects = true;
+  }
+}
+
+// Resets the region fields of active CMTasks whose values point
+// into the collection set.
+void ConcurrentMark::reset_active_task_region_fields_in_cset() {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
+  assert(parallel_marking_threads() <= _max_task_num, "sanity");
+
+  for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
+    CMTask* task = _tasks[i];
+    HeapWord* task_finger = task->finger();
+    if (task_finger != NULL) {
+      assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
+      HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
+      if (finger_region->in_collection_set()) {
+        // The task's current region is in the collection set.
+        // This region will be evacuated in the current GC and
+        // the region fields in the task will be stale.
+        task->giveup_current_region();
+      }
+    }
+  }
 }
 
 // abandon current marking iteration due to a Full GC
@@ -3130,11 +3276,13 @@
 // We take a break if someone is trying to stop the world.
 bool ConcurrentMark::do_yield_check(int worker_i) {
   if (should_yield()) {
-    if (worker_i == 0)
+    if (worker_i == 0) {
       _g1h->g1_policy()->record_concurrent_pause();
+    }
     cmThread()->yield();
-    if (worker_i == 0)
+    if (worker_i == 0) {
       _g1h->g1_policy()->record_concurrent_pause_end();
+    }
     return true;
   } else {
     return false;
@@ -3152,9 +3300,8 @@
 
 bool ConcurrentMark::containing_cards_are_marked(void* start,
                                                  void* last) {
-  return
-    containing_card_is_marked(start) &&
-    containing_card_is_marked(last);
+  return containing_card_is_marked(start) &&
+         containing_card_is_marked(last);
 }
 
 #ifndef PRODUCT
@@ -3169,6 +3316,22 @@
 }
 #endif
 
+void CMTask::scan_object(oop obj) {
+  assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
+
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
+                           _task_id, (void*) obj);
+  }
+
+  size_t obj_size = obj->size();
+  _words_scanned += obj_size;
+
+  obj->oop_iterate(_cm_oop_closure);
+  statsOnly( ++_objs_scanned );
+  check_limits();
+}
+
 // Closure for iteration over bitmaps
 class CMBitMapClosure : public BitMapClosure {
 private:
@@ -3232,43 +3395,17 @@
   CMObjectClosure(CMTask* task) : _task(task) { }
 };
 
-// Closure for iterating over object fields
-class CMOopClosure : public OopClosure {
-private:
-  G1CollectedHeap*   _g1h;
-  ConcurrentMark*    _cm;
-  CMTask*            _task;
-
-public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-
-  template <class T> void do_oop_work(T* p) {
-    assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-    assert(!_g1h->is_on_master_free_list(
-                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
-
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    if (_cm->verbose_high())
-      gclog_or_tty->print_cr("[%d] we're looking at location "
-                             "*"PTR_FORMAT" = "PTR_FORMAT,
-                             _task->task_id(), p, (void*) obj);
-    _task->deal_with_reference(obj);
+G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
+                               ConcurrentMark* cm,
+                               CMTask* task)
+  : _g1h(g1h), _cm(cm), _task(task) {
+  assert(_ref_processor == NULL, "should be initialized to NULL");
+
+  if (G1UseConcMarkReferenceProcessing) {
+    _ref_processor = g1h->ref_processor();
+    assert(_ref_processor != NULL, "should not be NULL");
   }
-
-  CMOopClosure(G1CollectedHeap* g1h,
-               ConcurrentMark* cm,
-               CMTask* task)
-    : _g1h(g1h), _cm(cm), _task(task)
-  {
-    assert(_ref_processor == NULL, "should be initialized to NULL");
-
-    if (G1UseConcMarkReferenceProcessing) {
-      _ref_processor = g1h->ref_processor();
-      assert(_ref_processor != NULL, "should not be NULL");
-    }
-  }
-};
+}
 
 void CMTask::setup_for_region(HeapRegion* hr) {
   // Separated the asserts so that we know which one fires.
@@ -3277,9 +3414,10 @@
   assert(!hr->continuesHumongous(),
         "claim_region() should have filtered out continues humongous regions");
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
                            _task_id, hr);
+  }
 
   _curr_region  = hr;
   _finger       = hr->bottom();
@@ -3292,10 +3430,11 @@
   HeapWord* limit           = hr->next_top_at_mark_start();
 
   if (limit == bottom) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] found an empty region "
                              "["PTR_FORMAT", "PTR_FORMAT")",
                              _task_id, bottom, limit);
+    }
     // The region was collected underneath our feet.
     // We set the finger to bottom to ensure that the bitmap
     // iteration that will follow this will not do anything.
@@ -3324,9 +3463,10 @@
 
 void CMTask::giveup_current_region() {
   assert(_curr_region != NULL, "invariant");
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
                            _task_id, _curr_region);
+  }
   clear_region_fields();
 }
 
@@ -3340,11 +3480,21 @@
   _region_finger = NULL;
 }
 
+void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
+  if (cm_oop_closure == NULL) {
+    assert(_cm_oop_closure != NULL, "invariant");
+  } else {
+    assert(_cm_oop_closure == NULL, "invariant");
+  }
+  _cm_oop_closure = cm_oop_closure;
+}
+
 void CMTask::reset(CMBitMap* nextMarkBitMap) {
   guarantee(nextMarkBitMap != NULL, "invariant");
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] resetting", _task_id);
+  }
 
   _nextMarkBitMap                = nextMarkBitMap;
   clear_region_fields();
@@ -3389,118 +3539,6 @@
   return !_cm->mark_stack_empty() || has_aborted();
 }
 
-// This determines whether the method below will check both the local
-// and global fingers when determining whether to push on the stack a
-// gray object (value 1) or whether it will only check the global one
-// (value 0). The tradeoffs are that the former will be a bit more
-// accurate and possibly push less on the stack, but it might also be
-// a little bit slower.
-
-#define _CHECK_BOTH_FINGERS_      1
-
-void CMTask::deal_with_reference(oop obj) {
-  if (_cm->verbose_high())
-    gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
-                           _task_id, (void*) obj);
-
-  ++_refs_reached;
-
-  HeapWord* objAddr = (HeapWord*) obj;
-  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
-  if (_g1h->is_in_g1_reserved(objAddr)) {
-    assert(obj != NULL, "is_in_g1_reserved should ensure this");
-    HeapRegion* hr =  _g1h->heap_region_containing(obj);
-    if (_g1h->is_obj_ill(obj, hr)) {
-      if (_cm->verbose_high())
-        gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
-                               _task_id, (void*) obj);
-
-      // we need to mark it first
-      if (_nextMarkBitMap->parMark(objAddr)) {
-        // No OrderAccess:store_load() is needed. It is implicit in the
-        // CAS done in parMark(objAddr) above
-        HeapWord* global_finger = _cm->finger();
-
-#if _CHECK_BOTH_FINGERS_
-        // we will check both the local and global fingers
-
-        if (_finger != NULL && objAddr < _finger) {
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
-                                   "pushing it", _task_id, _finger);
-          push(obj);
-        } else if (_curr_region != NULL && objAddr < _region_limit) {
-          // do nothing
-        } else if (objAddr < global_finger) {
-          // Notice that the global finger might be moving forward
-          // concurrently. This is not a problem. In the worst case, we
-          // mark the object while it is above the global finger and, by
-          // the time we read the global finger, it has moved forward
-          // passed this object. In this case, the object will probably
-          // be visited when a task is scanning the region and will also
-          // be pushed on the stack. So, some duplicate work, but no
-          // correctness problems.
-
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the global finger "
-                                   "("PTR_FORMAT"), pushing it",
-                                   _task_id, global_finger);
-          push(obj);
-        } else {
-          // do nothing
-        }
-#else // _CHECK_BOTH_FINGERS_
-        // we will only check the global finger
-
-        if (objAddr < global_finger) {
-          // see long comment above
-
-          if (_cm->verbose_high())
-            gclog_or_tty->print_cr("[%d] below the global finger "
-                                   "("PTR_FORMAT"), pushing it",
-                                   _task_id, global_finger);
-          push(obj);
-        }
-#endif // _CHECK_BOTH_FINGERS_
-      }
-    }
-  }
-}
-
-void CMTask::push(oop obj) {
-  HeapWord* objAddr = (HeapWord*) obj;
-  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
-  assert(!_g1h->is_on_master_free_list(
-              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
-  assert(!_g1h->is_obj_ill(obj), "invariant");
-  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
-
-  if (_cm->verbose_high())
-    gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
-
-  if (!_task_queue->push(obj)) {
-    // The local task queue looks full. We need to push some entries
-    // to the global stack.
-
-    if (_cm->verbose_medium())
-      gclog_or_tty->print_cr("[%d] task queue overflow, "
-                             "moving entries to the global stack",
-                             _task_id);
-    move_entries_to_global_stack();
-
-    // this should succeed since, even if we overflow the global
-    // stack, we should have definitely removed some entries from the
-    // local queue. So, there must be space on it.
-    bool success = _task_queue->push(obj);
-    assert(success, "invariant");
-  }
-
-  statsOnly( int tmp_size = _task_queue->size();
-             if (tmp_size > _local_max_size)
-               _local_max_size = tmp_size;
-             ++_local_pushes );
-}
-
 void CMTask::reached_limit() {
   assert(_words_scanned >= _words_scanned_limit ||
          _refs_reached >= _refs_reached_limit ,
@@ -3509,8 +3547,7 @@
 }
 
 void CMTask::regular_clock_call() {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // First, we need to recalculate the words scanned and refs reached
   // limits for the next clock call.
@@ -3527,8 +3564,7 @@
   // If we are not concurrent (i.e. we're doing remark) we don't need
   // to check anything else. The other steps are only needed during
   // the concurrent marking phase.
-  if (!concurrent())
-    return;
+  if (!concurrent()) return;
 
   // (2) If marking has been aborted for Full GC, then we also abort.
   if (_cm->has_aborted()) {
@@ -3541,23 +3577,25 @@
 
   // (3) If marking stats are enabled, then we update the step history.
 #if _MARKING_STATS_
-  if (_words_scanned >= _words_scanned_limit)
+  if (_words_scanned >= _words_scanned_limit) {
     ++_clock_due_to_scanning;
-  if (_refs_reached >= _refs_reached_limit)
+  }
+  if (_refs_reached >= _refs_reached_limit) {
     ++_clock_due_to_marking;
+  }
 
   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
   _interval_start_time_ms = curr_time_ms;
   _all_clock_intervals_ms.add(last_interval_ms);
 
   if (_cm->verbose_medium()) {
-    gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
-                           "scanned = %d%s, refs reached = %d%s",
-                           _task_id, last_interval_ms,
-                           _words_scanned,
-                           (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
-                           _refs_reached,
-                           (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
+      gclog_or_tty->print_cr("[%d] regular clock, interval = %1.2lfms, "
+                        "scanned = %d%s, refs reached = %d%s",
+                        _task_id, last_interval_ms,
+                        _words_scanned,
+                        (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
+                        _refs_reached,
+                        (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
   }
 #endif // _MARKING_STATS_
 
@@ -3584,9 +3622,10 @@
   // buffers available for processing. If there are, we abort.
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] aborting to deal with pending SATB buffers",
                              _task_id);
+    }
     // we do need to process SATB buffers, we'll abort and restart
     // the marking task to do so
     set_has_aborted();
@@ -3609,8 +3648,9 @@
   // entries to/from the global stack). It basically tries to decrease the
   // scanning limit so that the clock is called earlier.
 
-  if (_cm->verbose_medium())
+  if (_cm->verbose_medium()) {
     gclog_or_tty->print_cr("[%d] decreasing limits", _task_id);
+  }
 
   _words_scanned_limit = _real_words_scanned_limit -
     3 * words_scanned_period / 4;
@@ -3636,18 +3676,22 @@
     statsOnly( ++_global_transfers_to; _local_pops += n );
 
     if (!_cm->mark_stack_push(buffer, n)) {
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] aborting due to global stack overflow", _task_id);
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] aborting due to global stack overflow",
+                               _task_id);
+      }
       set_has_aborted();
     } else {
       // the transfer was successful
 
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] pushed %d entries to the global stack",
                                _task_id, n);
+      }
       statsOnly( int tmp_size = _cm->mark_stack_size();
-                 if (tmp_size > _global_max_size)
+                 if (tmp_size > _global_max_size) {
                    _global_max_size = tmp_size;
+                 }
                  _global_pushes += n );
     }
   }
@@ -3668,9 +3712,10 @@
     // yes, we did actually pop at least one entry
 
     statsOnly( ++_global_transfers_from; _global_pops += n );
-    if (_cm->verbose_medium())
+    if (_cm->verbose_medium()) {
       gclog_or_tty->print_cr("[%d] popped %d entries from the global stack",
                              _task_id, n);
+    }
     for (int i = 0; i < n; ++i) {
       bool success = _task_queue->push(buffer[i]);
       // We only call this when the local queue is empty or under a
@@ -3679,8 +3724,9 @@
     }
 
     statsOnly( int tmp_size = _task_queue->size();
-               if (tmp_size > _local_max_size)
+               if (tmp_size > _local_max_size) {
                  _local_max_size = tmp_size;
+               }
                _local_pushes += n );
   }
 
@@ -3689,31 +3735,33 @@
 }
 
 void CMTask::drain_local_queue(bool partially) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // Decide what the target size is, depending whether we're going to
   // drain it partially (so that other tasks can steal if they run out
   // of things to do) or totally (at the very end).
   size_t target_size;
-  if (partially)
+  if (partially) {
     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
-  else
+  } else {
     target_size = 0;
+  }
 
   if (_task_queue->size() > target_size) {
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("[%d] draining local queue, target size = %d",
                              _task_id, target_size);
+    }
 
     oop obj;
     bool ret = _task_queue->pop_local(obj);
     while (ret) {
       statsOnly( ++_local_pops );
 
-      if (_cm->verbose_high())
+      if (_cm->verbose_high()) {
         gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
                                (void*) obj);
+      }
 
       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
       assert(!_g1h->is_on_master_free_list(
@@ -3721,21 +3769,22 @@
 
       scan_object(obj);
 
-      if (_task_queue->size() <= target_size || has_aborted())
+      if (_task_queue->size() <= target_size || has_aborted()) {
         ret = false;
-      else
+      } else {
         ret = _task_queue->pop_local(obj);
+      }
     }
 
-    if (_cm->verbose_high())
+    if (_cm->verbose_high()) {
       gclog_or_tty->print_cr("[%d] drained local queue, size = %d",
                              _task_id, _task_queue->size());
+    }
   }
 }
 
 void CMTask::drain_global_stack(bool partially) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // We have a policy to drain the local queue before we attempt to
   // drain the global stack.
@@ -3748,24 +3797,27 @@
   // because another task might be doing the same, we might in fact
   // drop below the target. But, this is not a problem.
   size_t target_size;
-  if (partially)
+  if (partially) {
     target_size = _cm->partial_mark_stack_size_target();
-  else
+  } else {
     target_size = 0;
+  }
 
   if (_cm->mark_stack_size() > target_size) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] draining global_stack, target size %d",
                              _task_id, target_size);
+    }
 
     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
       get_entries_from_global_stack();
       drain_local_queue(partially);
     }
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] drained global stack, size = %d",
                              _task_id, _cm->mark_stack_size());
+    }
   }
 }
 
@@ -3774,8 +3826,7 @@
 // replicated. We should really get rid of the single-threaded version
 // of the code to simplify things.
 void CMTask::drain_satb_buffers() {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   // We set this so that the regular clock knows that we're in the
   // middle of draining buffers and doesn't set the abort flag when it
@@ -3785,26 +3836,29 @@
 
   CMObjectClosure oc(this);
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  if (G1CollectedHeap::use_parallel_gc_threads())
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
     satb_mq_set.set_par_closure(_task_id, &oc);
-  else
+  } else {
     satb_mq_set.set_closure(&oc);
+  }
 
   // This keeps claiming and applying the closure to completed buffers
   // until we run out of buffers or we need to abort.
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     while (!has_aborted() &&
            satb_mq_set.par_apply_closure_to_completed_buffer(_task_id)) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
+      }
       statsOnly( ++_satb_buffers_processed );
       regular_clock_call();
     }
   } else {
     while (!has_aborted() &&
            satb_mq_set.apply_closure_to_completed_buffer()) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] processed an SATB buffer", _task_id);
+      }
       statsOnly( ++_satb_buffers_processed );
       regular_clock_call();
     }
@@ -3812,10 +3866,11 @@
 
   if (!concurrent() && !has_aborted()) {
     // We should only do this during remark.
-    if (G1CollectedHeap::use_parallel_gc_threads())
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
       satb_mq_set.par_iterate_closure_all_threads(_task_id);
-    else
+    } else {
       satb_mq_set.iterate_closure_all_threads();
+    }
   }
 
   _draining_satb_buffers = false;
@@ -3824,10 +3879,11 @@
          concurrent() ||
          satb_mq_set.completed_buffers_num() == 0, "invariant");
 
-  if (G1CollectedHeap::use_parallel_gc_threads())
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
     satb_mq_set.set_par_closure(_task_id, NULL);
-  else
+  } else {
     satb_mq_set.set_closure(NULL);
+  }
 
   // again, this was a potentially expensive operation, decrease the
   // limits to get the regular clock call early
@@ -3835,16 +3891,16 @@
 }
 
 void CMTask::drain_region_stack(BitMapClosure* bc) {
-  if (has_aborted())
-    return;
+  if (has_aborted()) return;
 
   assert(_region_finger == NULL,
          "it should be NULL when we're not scanning a region");
 
   if (!_cm->region_stack_empty() || !_aborted_region.is_empty()) {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
                              _task_id, _cm->region_stack_size());
+    }
 
     MemRegion mr;
 
@@ -3852,9 +3908,11 @@
       mr = _aborted_region;
       _aborted_region = MemRegion();
 
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] scanning aborted region [ " PTR_FORMAT ", " PTR_FORMAT " )",
-                             _task_id, mr.start(), mr.end());
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] scanning aborted region "
+                               "[ " PTR_FORMAT ", " PTR_FORMAT " )",
+                               _task_id, mr.start(), mr.end());
+      }
     } else {
       mr = _cm->region_stack_pop_lock_free();
       // it returns MemRegion() if the pop fails
@@ -3862,10 +3920,11 @@
     }
 
     while (mr.start() != NULL) {
-      if (_cm->verbose_medium())
+      if (_cm->verbose_medium()) {
         gclog_or_tty->print_cr("[%d] we are scanning region "
                                "["PTR_FORMAT", "PTR_FORMAT")",
                                _task_id, mr.start(), mr.end());
+      }
 
       assert(mr.end() <= _cm->finger(),
              "otherwise the region shouldn't be on the stack");
@@ -3876,9 +3935,9 @@
 
         // We finished iterating over the region without aborting.
         regular_clock_call();
-        if (has_aborted())
+        if (has_aborted()) {
           mr = MemRegion();
-        else {
+        } else {
           mr = _cm->region_stack_pop_lock_free();
           // it returns MemRegion() if the pop fails
           statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
@@ -3924,9 +3983,10 @@
       _region_finger = NULL;
     }
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
                              _task_id, _cm->region_stack_size());
+    }
   }
 }
 
@@ -4127,17 +4187,18 @@
 
   ++_calls;
 
-  if (_cm->verbose_low())
+  if (_cm->verbose_low()) {
     gclog_or_tty->print_cr("[%d] >>>>>>>>>> START, call = %d, "
                            "target = %1.2lfms >>>>>>>>>>",
                            _task_id, _calls, _time_target_ms);
+  }
 
   // Set up the bitmap and oop closures. Anything that uses them is
   // eventually called from this method, so it is OK to allocate these
   // statically.
   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
-  CMOopClosure    oop_closure(_g1h, _cm, this);
-  set_oop_closure(&oop_closure);
+  G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
+  set_cm_oop_closure(&cm_oop_closure);
 
   if (_cm->has_overflown()) {
     // This can happen if the region stack or the mark stack overflows
@@ -4187,11 +4248,12 @@
       // fresh region, _finger points to start().
       MemRegion mr = MemRegion(_finger, _region_limit);
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] we're scanning part "
                                "["PTR_FORMAT", "PTR_FORMAT") "
                                "of region "PTR_FORMAT,
                                _task_id, _finger, _region_limit, _curr_region);
+      }
 
       // Let's iterate over the bitmap of the part of the
       // region that is left.
@@ -4247,17 +4309,19 @@
       assert(_curr_region  == NULL, "invariant");
       assert(_finger       == NULL, "invariant");
       assert(_region_limit == NULL, "invariant");
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
+      }
       HeapRegion* claimed_region = _cm->claim_region(_task_id);
       if (claimed_region != NULL) {
         // Yes, we managed to claim one
         statsOnly( ++_regions_claimed );
 
-        if (_cm->verbose_low())
+        if (_cm->verbose_low()) {
           gclog_or_tty->print_cr("[%d] we successfully claimed "
                                  "region "PTR_FORMAT,
                                  _task_id, claimed_region);
+        }
 
         setup_for_region(claimed_region);
         assert(_curr_region == claimed_region, "invariant");
@@ -4284,8 +4348,9 @@
     assert(_cm->out_of_regions(),
            "at this point we should be out of regions");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
+    }
 
     // Try to reduce the number of available SATB buffers so that
     // remark has less work to do.
@@ -4309,17 +4374,19 @@
     assert(_cm->out_of_regions() && _task_queue->size() == 0,
            "only way to reach here");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
+    }
 
     while (!has_aborted()) {
       oop obj;
       statsOnly( ++_steal_attempts );
 
       if (_cm->try_stealing(_task_id, &_hash_seed, obj)) {
-        if (_cm->verbose_medium())
+        if (_cm->verbose_medium()) {
           gclog_or_tty->print_cr("[%d] stolen "PTR_FORMAT" successfully",
                                  _task_id, (void*) obj);
+        }
 
         statsOnly( ++_steals );
 
@@ -4357,8 +4424,9 @@
     assert(_cm->out_of_regions(), "only way to reach here");
     assert(_task_queue->size() == 0, "only way to reach here");
 
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
+    }
 
     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
     // The CMTask class also extends the TerminatorTerminator class,
@@ -4396,14 +4464,17 @@
       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
       guarantee(!_cm->region_stack_overflow(), "only way to reach here");
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
+      }
     } else {
       // Apparently there's more work to do. Let's abort this task. It
       // will restart it and we can hopefully find more things to do.
 
-      if (_cm->verbose_low())
-        gclog_or_tty->print_cr("[%d] apparently there is more work to do", _task_id);
+      if (_cm->verbose_low()) {
+        gclog_or_tty->print_cr("[%d] apparently there is more work to do",
+                               _task_id);
+      }
 
       set_has_aborted();
       statsOnly( ++_aborted_termination );
@@ -4413,7 +4484,7 @@
   // Mainly for debugging purposes to make sure that a pointer to the
   // closure which was statically allocated in this frame doesn't
   // escape it by accident.
-  set_oop_closure(NULL);
+  set_cm_oop_closure(NULL);
   double end_time_ms = os::elapsedVTime() * 1000.0;
   double elapsed_time_ms = end_time_ms - _start_time_ms;
   // Update the step history.
@@ -4440,8 +4511,9 @@
       // what they are doing and re-initialise in a safe manner. We
       // will achieve this with the use of two barrier sync points.
 
-      if (_cm->verbose_low())
+      if (_cm->verbose_low()) {
         gclog_or_tty->print_cr("[%d] detected overflow", _task_id);
+      }
 
       _cm->enter_first_sync_barrier(_task_id);
       // When we exit this sync barrier we know that all tasks have
@@ -4464,15 +4536,17 @@
       gclog_or_tty->print_cr("[%d] <<<<<<<<<< ABORTING, target = %1.2lfms, "
                              "elapsed = %1.2lfms <<<<<<<<<<",
                              _task_id, _time_target_ms, elapsed_time_ms);
-      if (_cm->has_aborted())
+      if (_cm->has_aborted()) {
         gclog_or_tty->print_cr("[%d] ========== MARKING ABORTED ==========",
                                _task_id);
+      }
     }
   } else {
-    if (_cm->verbose_low())
+    if (_cm->verbose_low()) {
       gclog_or_tty->print_cr("[%d] <<<<<<<<<< FINISHED, target = %1.2lfms, "
                              "elapsed = %1.2lfms <<<<<<<<<<",
                              _task_id, _time_target_ms, elapsed_time_ms);
+    }
   }
 
   _claimed = false;
@@ -4488,7 +4562,7 @@
     _nextMarkBitMap(NULL), _hash_seed(17),
     _task_queue(task_queue),
     _task_queues(task_queues),
-    _oop_closure(NULL),
+    _cm_oop_closure(NULL),
     _aborted_region(MemRegion()) {
   guarantee(task_queue != NULL, "invariant");
   guarantee(task_queues != NULL, "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -131,22 +131,22 @@
   void mark(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    _bm.at_put(heapWordToOffset(addr), true);
+    _bm.set_bit(heapWordToOffset(addr));
   }
   void clear(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    _bm.at_put(heapWordToOffset(addr), false);
+    _bm.clear_bit(heapWordToOffset(addr));
   }
   bool parMark(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    return _bm.par_at_put(heapWordToOffset(addr), true);
+    return _bm.par_set_bit(heapWordToOffset(addr));
   }
   bool parClear(HeapWord* addr) {
     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
            "outside underlying space?");
-    return _bm.par_at_put(heapWordToOffset(addr), false);
+    return _bm.par_clear_bit(heapWordToOffset(addr));
   }
   void markRange(MemRegion mr);
   void clearAll();
@@ -605,10 +605,10 @@
   void mark_stack_pop(oop* arr, int max, int* n) {
     _markStack.par_pop_arr(arr, max, n);
   }
-  size_t mark_stack_size()              { return _markStack.size(); }
+  size_t mark_stack_size()                { return _markStack.size(); }
   size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
-  bool mark_stack_overflow()            { return _markStack.overflow(); }
-  bool mark_stack_empty()               { return _markStack.isEmpty(); }
+  bool mark_stack_overflow()              { return _markStack.overflow(); }
+  bool mark_stack_empty()                 { return _markStack.isEmpty(); }
 
   // (Lock-free) Manipulation of the region stack
   bool region_stack_push_lock_free(MemRegion mr) {
@@ -736,12 +736,14 @@
   // will dump the contents of its reference fields, as well as
   // liveness information for the object and its referents. The dump
   // will be written to a file with the following name:
-  // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
-  // whether the prev (use_prev_marking == true) or next
-  // (use_prev_marking == false) marking information will be used to
-  // determine the liveness of each object / referent. If all is true,
-  // all objects in the heap will be dumped, otherwise only the live
-  // ones. In the dump the following symbols / abbreviations are used:
+  // G1PrintReachableBaseFile + "." + str.
+  // vo decides whether the prev (vo == UsePrevMarking), the next
+  // (vo == UseNextMarking) marking information, or the mark word
+  // (vo == UseMarkWord) will be used to determine the liveness of
+  // each object / referent.
+  // If all is true, all objects in the heap will be dumped, otherwise
+  // only the live ones. In the dump the following symbols / breviations
+  // are used:
   //   M : an explicitly live object (its bitmap bit is set)
   //   > : an implicitly live object (over tams)
   //   O : an object outside the G1 heap (typically: in the perm gen)
@@ -749,7 +751,7 @@
   //   AND MARKED : indicates that an object is both explicitly and
   //   implicitly live (it should be one or the other, not both)
   void print_reachable(const char* str,
-                       bool use_prev_marking, bool all) PRODUCT_RETURN;
+                       VerifyOption vo, bool all) PRODUCT_RETURN;
 
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
@@ -809,10 +811,19 @@
 
   // It indicates that a new collection set is being chosen.
   void newCSet();
+
   // It registers a collection set heap region with CM. This is used
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Resets the region fields of any active CMTask whose region fields
+  // are in the collection set (i.e. the region currently claimed by
+  // the CMTask will be evacuated and may be used, subsequently, as
+  // an alloc region). When this happens the region fields in the CMTask
+  // are stale and, hence, should be cleared causing the worker thread
+  // to claim a new region.
+  void reset_active_task_region_fields_in_cset();
+
   // Registers the maximum region-end associated with a set of
   // regions with CM. Again this is used to determine whether any
   // heap regions are located above the finger.
@@ -822,8 +833,9 @@
     // _min_finger then we need to gray objects.
     // This routine is like registerCSetRegion but for an entire
     // collection of regions.
-    if (max_finger > _min_finger)
+    if (max_finger > _min_finger) {
       _should_gray_objects = true;
+    }
   }
 
   // Returns "true" if at least one mark has been completed.
@@ -869,14 +881,18 @@
   // The following indicate whether a given verbose level has been
   // set. Notice that anything above stats is conditional to
   // _MARKING_VERBOSE_ having been set to 1
-  bool verbose_stats()
-    { return _verbose_level >= stats_verbose; }
-  bool verbose_low()
-    { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; }
-  bool verbose_medium()
-    { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; }
-  bool verbose_high()
-    { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; }
+  bool verbose_stats() {
+    return _verbose_level >= stats_verbose;
+  }
+  bool verbose_low() {
+    return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
+  }
+  bool verbose_medium() {
+    return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
+  }
+  bool verbose_high() {
+    return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
+  }
 };
 
 // A class representing a marking task.
@@ -919,7 +935,7 @@
   double                      _start_time_ms;
 
   // the oop closure used for iterations over oops
-  OopClosure*                 _oop_closure;
+  G1CMOopClosure*             _cm_oop_closure;
 
   // the region this task is scanning, NULL if we're not scanning any
   HeapRegion*                 _curr_region;
@@ -1039,9 +1055,6 @@
   void setup_for_region(HeapRegion* hr);
   // it brings up-to-date the limit of the region
   void update_region_limit();
-  // it resets the local fields after a task has finished scanning a
-  // region
-  void giveup_current_region();
 
   // called when either the words scanned or the refs visited limit
   // has been reached
@@ -1055,8 +1068,9 @@
   // respective limit and calls reached_limit() if they have
   void check_limits() {
     if (_words_scanned >= _words_scanned_limit ||
-        _refs_reached >= _refs_reached_limit)
+        _refs_reached >= _refs_reached_limit) {
       reached_limit();
+    }
   }
   // this is supposed to be called regularly during a marking step as
   // it checks a bunch of conditions that might cause the marking step
@@ -1094,6 +1108,11 @@
   // exit the termination protocol after it's entered it.
   virtual bool should_exit_termination();
 
+  // Resets the local region fields after a task has finished scanning a
+  // region; or when they have become stale as a result of the region
+  // being evacuated.
+  void giveup_current_region();
+
   HeapWord* finger()            { return _finger; }
 
   bool has_aborted()            { return _has_aborted; }
@@ -1111,32 +1130,17 @@
   // Clears any recorded partially scanned region
   void clear_aborted_region()   { set_aborted_region(MemRegion()); }
 
-  void set_oop_closure(OopClosure* oop_closure) {
-    _oop_closure = oop_closure;
-  }
+  void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
 
   // It grays the object by marking it and, if necessary, pushing it
   // on the local queue
-  void deal_with_reference(oop obj);
+  inline void deal_with_reference(oop obj);
 
   // It scans an object and visits its children.
-  void scan_object(oop obj) {
-    assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
-
-    if (_cm->verbose_high())
-      gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
-                             _task_id, (void*) obj);
-
-    size_t obj_size = obj->size();
-    _words_scanned += obj_size;
-
-    obj->oop_iterate(_oop_closure);
-    statsOnly( ++_objs_scanned );
-    check_limits();
-  }
+  void scan_object(oop obj);
 
   // It pushes an object on the local queue.
-  void push(oop obj);
+  inline void push(oop obj);
 
   // These two move entries to/from the global stack.
   void move_entries_to_global_stack();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
+
+#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+
+inline void CMTask::push(oop obj) {
+  HeapWord* objAddr = (HeapWord*) obj;
+  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
+  assert(!_g1h->is_on_master_free_list(
+              _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
+  assert(!_g1h->is_obj_ill(obj), "invariant");
+  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
+
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
+  }
+
+  if (!_task_queue->push(obj)) {
+    // The local task queue looks full. We need to push some entries
+    // to the global stack.
+
+    if (_cm->verbose_medium()) {
+      gclog_or_tty->print_cr("[%d] task queue overflow, "
+                             "moving entries to the global stack",
+                             _task_id);
+    }
+    move_entries_to_global_stack();
+
+    // this should succeed since, even if we overflow the global
+    // stack, we should have definitely removed some entries from the
+    // local queue. So, there must be space on it.
+    bool success = _task_queue->push(obj);
+    assert(success, "invariant");
+  }
+
+  statsOnly( int tmp_size = _task_queue->size();
+             if (tmp_size > _local_max_size) {
+               _local_max_size = tmp_size;
+             }
+             ++_local_pushes );
+}
+
+// This determines whether the method below will check both the local
+// and global fingers when determining whether to push on the stack a
+// gray object (value 1) or whether it will only check the global one
+// (value 0). The tradeoffs are that the former will be a bit more
+// accurate and possibly push less on the stack, but it might also be
+// a little bit slower.
+
+#define _CHECK_BOTH_FINGERS_      1
+
+inline void CMTask::deal_with_reference(oop obj) {
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
+                           _task_id, (void*) obj);
+  }
+
+  ++_refs_reached;
+
+  HeapWord* objAddr = (HeapWord*) obj;
+  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
+ if (_g1h->is_in_g1_reserved(objAddr)) {
+    assert(obj != NULL, "null check is implicit");
+    if (!_nextMarkBitMap->isMarked(objAddr)) {
+      // Only get the containing region if the object is not marked on the
+      // bitmap (otherwise, it's a waste of time since we won't do
+      // anything with it).
+      HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
+      if (!hr->obj_allocated_since_next_marking(obj)) {
+        if (_cm->verbose_high()) {
+          gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
+                                 _task_id, (void*) obj);
+        }
+
+        // we need to mark it first
+        if (_nextMarkBitMap->parMark(objAddr)) {
+          // No OrderAccess:store_load() is needed. It is implicit in the
+          // CAS done in parMark(objAddr) above
+          HeapWord* global_finger = _cm->finger();
+
+#if _CHECK_BOTH_FINGERS_
+          // we will check both the local and global fingers
+
+          if (_finger != NULL && objAddr < _finger) {
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
+                                     "pushing it", _task_id, _finger);
+            }
+            push(obj);
+          } else if (_curr_region != NULL && objAddr < _region_limit) {
+            // do nothing
+          } else if (objAddr < global_finger) {
+            // Notice that the global finger might be moving forward
+            // concurrently. This is not a problem. In the worst case, we
+            // mark the object while it is above the global finger and, by
+            // the time we read the global finger, it has moved forward
+            // passed this object. In this case, the object will probably
+            // be visited when a task is scanning the region and will also
+            // be pushed on the stack. So, some duplicate work, but no
+            // correctness problems.
+
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the global finger "
+                                     "("PTR_FORMAT"), pushing it",
+                                     _task_id, global_finger);
+            }
+            push(obj);
+          } else {
+            // do nothing
+          }
+#else // _CHECK_BOTH_FINGERS_
+          // we will only check the global finger
+
+          if (objAddr < global_finger) {
+            // see long comment above
+
+            if (_cm->verbose_high()) {
+              gclog_or_tty->print_cr("[%d] below the global finger "
+                                     "("PTR_FORMAT"), pushing it",
+                                     _task_id, global_finger);
+            }
+            push(obj);
+          }
+#endif // _CHECK_BOTH_FINGERS_
+        }
+      }
+    }
+  }
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -428,6 +428,37 @@
   _cmThread->stop();
 }
 
+#ifdef ASSERT
+// A region is added to the collection set as it is retired
+// so an address p can point to a region which will be in the
+// collection set but has not yet been retired.  This method
+// therefore is only accurate during a GC pause after all
+// regions have been retired.  It is used for debugging
+// to check if an nmethod has references to objects that can
+// be move during a partial collection.  Though it can be
+// inaccurate, it is sufficient for G1 because the conservative
+// implementation of is_scavengable() for G1 will indicate that
+// all nmethods must be scanned during a partial collection.
+bool G1CollectedHeap::is_in_partial_collection(const void* p) {
+  HeapRegion* hr = heap_region_containing(p);
+  return hr != NULL && hr->in_collection_set();
+}
+#endif
+
+// Returns true if the reference points to an object that
+// can move in an incremental collecction.
+bool G1CollectedHeap::is_scavengable(const void* p) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  HeapRegion* hr = heap_region_containing(p);
+  if (hr == NULL) {
+     // perm gen (or null)
+     return false;
+  } else {
+    return !hr->isHumongous();
+  }
+}
+
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
@@ -547,16 +578,10 @@
   }
   if (res == NULL && do_expand) {
     if (expand(word_size * HeapWordSize)) {
-      // The expansion succeeded and so we should have at least one
-      // region on the free list.
-      res = _free_list.remove_head();
-    }
-  }
-  if (res != NULL) {
-    if (G1PrintHeapRegions) {
-      gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
-                             "top "PTR_FORMAT, res->hrs_index(),
-                             res->bottom(), res->end(), res->top());
+      // Even though the heap was expanded, it might not have reached
+      // the desired size. So, we cannot assume that the allocation
+      // will succeed.
+      res = _free_list.remove_head_or_null();
     }
   }
   return res;
@@ -567,22 +592,27 @@
   HeapRegion* alloc_region = NULL;
   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
     alloc_region = new_region(word_size, true /* do_expand */);
-    if (purpose == GCAllocForSurvived && alloc_region != NULL) {
-      alloc_region->set_survivor();
+    if (alloc_region != NULL) {
+      if (purpose == GCAllocForSurvived) {
+        _hr_printer.alloc(alloc_region, G1HRPrinter::Survivor);
+        alloc_region->set_survivor();
+      } else {
+        _hr_printer.alloc(alloc_region, G1HRPrinter::Old);
+      }
+      ++_gc_alloc_region_counts[purpose];
     }
-    ++_gc_alloc_region_counts[purpose];
   } else {
     g1_policy()->note_alloc_region_limit_reached(purpose);
   }
   return alloc_region;
 }
 
-int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
-                                                       size_t word_size) {
+size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
+                                                          size_t word_size) {
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
-  int first = -1;
+  size_t first = G1_NULL_HRS_INDEX;
   if (num_regions == 1) {
     // Only one region to allocate, no need to go through the slower
     // path. The caller will attempt the expasion if this fails, so
@@ -591,7 +621,7 @@
     if (hr != NULL) {
       first = hr->hrs_index();
     } else {
-      first = -1;
+      first = G1_NULL_HRS_INDEX;
     }
   } else {
     // We can't allocate humongous regions while cleanupComplete() is
@@ -606,10 +636,10 @@
     append_secondary_free_list_if_not_empty_with_lock();
 
     if (free_regions() >= num_regions) {
-      first = _hrs->find_contiguous(num_regions);
-      if (first != -1) {
-        for (int i = first; i < first + (int) num_regions; ++i) {
-          HeapRegion* hr = _hrs->at(i);
+      first = _hrs.find_contiguous(num_regions);
+      if (first != G1_NULL_HRS_INDEX) {
+        for (size_t i = first; i < first + num_regions; ++i) {
+          HeapRegion* hr = region_at(i);
           assert(hr->is_empty(), "sanity");
           assert(is_on_master_free_list(hr), "sanity");
           hr->set_pending_removal(true);
@@ -622,15 +652,15 @@
 }
 
 HeapWord*
-G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
+G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
                                                            size_t num_regions,
                                                            size_t word_size) {
-  assert(first != -1, "pre-condition");
+  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
   // Index of last region in the series + 1.
-  int last = first + (int) num_regions;
+  size_t last = first + num_regions;
 
   // We need to initialize the region(s) we just discovered. This is
   // a bit tricky given that it can happen concurrently with
@@ -645,7 +675,7 @@
   assert(word_size <= word_size_sum, "sanity");
 
   // This will be the "starts humongous" region.
-  HeapRegion* first_hr = _hrs->at(first);
+  HeapRegion* first_hr = region_at(first);
   // The header of the new object will be placed at the bottom of
   // the first region.
   HeapWord* new_obj = first_hr->bottom();
@@ -680,8 +710,8 @@
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
-  for (int i = first + 1; i < last; ++i) {
-    hr = _hrs->at(i);
+  for (size_t i = first + 1; i < last; ++i) {
+    hr = region_at(i);
     hr->set_continuesHumongous(first_hr);
   }
   // If we have "continues humongous" regions (hr != NULL), then the
@@ -702,6 +732,17 @@
   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
          "new_top should be in this region");
   first_hr->set_top(new_top);
+  if (_hr_printer.is_active()) {
+    HeapWord* bottom = first_hr->bottom();
+    HeapWord* end = first_hr->orig_end();
+    if ((first + 1) == last) {
+      // the series has a single humongous region
+      _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
+    } else {
+      // the series has more than one humongous regions
+      _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
+    }
+  }
 
   // Now, we will update the top fields of the "continues humongous"
   // regions. The reason we need to do this is that, otherwise,
@@ -715,17 +756,19 @@
   // last one) is actually used when we will free up the humongous
   // region in free_humongous_region().
   hr = NULL;
-  for (int i = first + 1; i < last; ++i) {
-    hr = _hrs->at(i);
+  for (size_t i = first + 1; i < last; ++i) {
+    hr = region_at(i);
     if ((i + 1) == last) {
       // last continues humongous region
       assert(hr->bottom() < new_top && new_top <= hr->end(),
              "new_top should fall on this region");
       hr->set_top(new_top);
+      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
     } else {
       // not last one
       assert(new_top > hr->end(), "new_top should be above this region");
       hr->set_top(hr->end());
+      _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
     }
   }
   // If we have continues humongous regions (hr != NULL), then the
@@ -752,9 +795,9 @@
   size_t num_regions =
          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   size_t x_size = expansion_regions();
-  size_t fs = _hrs->free_suffix();
-  int first = humongous_obj_allocate_find_first(num_regions, word_size);
-  if (first == -1) {
+  size_t fs = _hrs.free_suffix();
+  size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
+  if (first == G1_NULL_HRS_INDEX) {
     // The only thing we can do now is attempt expansion.
     if (fs + x_size >= num_regions) {
       // If the number of regions we're trying to allocate for this
@@ -768,16 +811,16 @@
       assert(num_regions > fs, "earlier allocation should have succeeded");
 
       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
+        // Even though the heap was expanded, it might not have
+        // reached the desired size. So, we cannot assume that the
+        // allocation will succeed.
         first = humongous_obj_allocate_find_first(num_regions, word_size);
-        // If the expansion was successful then the allocation
-        // should have been successful.
-        assert(first != -1, "this should have worked");
       }
     }
   }
 
   HeapWord* result = NULL;
-  if (first != -1) {
+  if (first != G1_NULL_HRS_INDEX) {
     result =
       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
     assert(result != NULL, "it should always return a valid result");
@@ -798,12 +841,8 @@
 
 HeapWord*
 G1CollectedHeap::mem_allocate(size_t word_size,
-                              bool   is_noref,
-                              bool   is_tlab,
                               bool*  gc_overhead_limit_was_exceeded) {
   assert_heap_not_locked_and_not_at_safepoint();
-  assert(!is_tlab, "mem_allocate() this should not be called directly "
-         "to allocate TLABs");
 
   // Loop until the allocation is satisified, or unsatisfied after GC.
   for (int try_count = 1; /* we'll return */; try_count += 1) {
@@ -1127,6 +1166,35 @@
   }
 };
 
+class PostCompactionPrinterClosure: public HeapRegionClosure {
+private:
+  G1HRPrinter* _hr_printer;
+public:
+  bool doHeapRegion(HeapRegion* hr) {
+    assert(!hr->is_young(), "not expecting to find young regions");
+    // We only generate output for non-empty regions.
+    if (!hr->is_empty()) {
+      if (!hr->isHumongous()) {
+        _hr_printer->post_compaction(hr, G1HRPrinter::Old);
+      } else if (hr->startsHumongous()) {
+        if (hr->capacity() == (size_t) HeapRegion::GrainBytes) {
+          // single humongous region
+          _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
+        } else {
+          _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
+        }
+      } else {
+        assert(hr->continuesHumongous(), "only way to get here");
+        _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+      }
+    }
+    return false;
+  }
+
+  PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
+    : _hr_printer(hr_printer) { }
+};
+
 bool G1CollectedHeap::do_collection(bool explicit_gc,
                                     bool clear_all_soft_refs,
                                     size_t word_size) {
@@ -1180,7 +1248,10 @@
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyBeforeGC:");
       prepare_for_verify();
-      Universe::verify(true);
+      Universe::verify(/* allow dirty */ true,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UsePrevMarking);
+
     }
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -1205,6 +1276,11 @@
     g1_rem_set()->cleanupHRRS();
     tear_down_region_lists();
 
+    // We should call this after we retire any currently active alloc
+    // regions so that all the ALLOC / RETIRE events are generated
+    // before the start GC event.
+    _hr_printer.start_gc(true /* full */, (size_t) total_collections());
+
     // We may have added regions to the current incremental collection
     // set between the last GC or pause and now. We need to clear the
     // incremental collection set and then start rebuilding it afresh
@@ -1232,7 +1308,6 @@
 
     ref_processor()->enable_discovery();
     ref_processor()->setup_policy(do_clear_all_soft_refs);
-
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
@@ -1253,7 +1328,10 @@
       HandleMark hm;  // Discard invalid handles created during verification
       gclog_or_tty->print(" VerifyAfterGC:");
       prepare_for_verify();
-      Universe::verify(false);
+      Universe::verify(/* allow dirty */ false,
+                       /* silent      */ false,
+                       /* option      */ VerifyOption_G1UsePrevMarking);
+
     }
     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 
@@ -1267,6 +1345,17 @@
     // Resize the heap if necessary.
     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
 
+    if (_hr_printer.is_active()) {
+      // We should do this after we potentially resize the heap so
+      // that all the COMMIT / UNCOMMIT events are generated before
+      // the end GC event.
+
+      PostCompactionPrinterClosure cl(hr_printer());
+      heap_region_iterate(&cl);
+
+      _hr_printer.end_gc(true /* full */, (size_t) total_collections());
+    }
+
     if (_cg1r->use_cache()) {
       _cg1r->clear_and_record_card_counts();
       _cg1r->clear_hot_cache();
@@ -1335,6 +1424,7 @@
   // Update the number of full collections that have been completed.
   increment_full_collections_completed(false /* concurrent */);
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 
   if (PrintHeapAtGC) {
@@ -1558,6 +1648,7 @@
 
   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
   if (expand(expand_bytes)) {
+    _hrs.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
                                  false /* expect_null_mutator_alloc_region */);
@@ -1565,6 +1656,19 @@
   return NULL;
 }
 
+void G1CollectedHeap::update_committed_space(HeapWord* old_end,
+                                             HeapWord* new_end) {
+  assert(old_end != new_end, "don't call this otherwise");
+  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
+
+  // Update the committed mem region.
+  _g1_committed.set_end(new_end);
+  // Tell the card table about the update.
+  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
+  // Tell the BOT about the update.
+  _bot_shared->resize(_g1_committed.word_size());
+}
+
 bool G1CollectedHeap::expand(size_t expand_bytes) {
   size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
@@ -1576,47 +1680,47 @@
                            old_mem_size/K, aligned_expand_bytes/K);
   }
 
-  HeapWord* old_end = (HeapWord*)_g1_storage.high();
+  // First commit the memory.
+  HeapWord* old_end = (HeapWord*) _g1_storage.high();
   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
   if (successful) {
-    HeapWord* new_end = (HeapWord*)_g1_storage.high();
-
-    // Expand the committed region.
-    _g1_committed.set_end(new_end);
-
-    // Tell the cardtable about the expansion.
-    Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-
-    // And the offset table as well.
-    _bot_shared->resize(_g1_committed.word_size());
-
-    expand_bytes = aligned_expand_bytes;
-    HeapWord* base = old_end;
-
-    // Create the heap regions for [old_end, new_end)
-    while (expand_bytes > 0) {
-      HeapWord* high = base + HeapRegion::GrainWords;
-
-      // Create a new HeapRegion.
-      MemRegion mr(base, high);
-      bool is_zeroed = !_g1_max_committed.contains(base);
-      HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
-
-      // Add it to the HeapRegionSeq.
-      _hrs->insert(hr);
-      _free_list.add_as_tail(hr);
-
-      // And we used up an expansion region to create it.
-      _expansion_regions--;
-
-      expand_bytes -= HeapRegion::GrainBytes;
-      base += HeapRegion::GrainWords;
+    // Then propagate this update to the necessary data structures.
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+    update_committed_space(old_end, new_end);
+
+    FreeRegionList expansion_list("Local Expansion List");
+    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
+    assert(mr.start() == old_end, "post-condition");
+    // mr might be a smaller region than what was requested if
+    // expand_by() was unable to allocate the HeapRegion instances
+    assert(mr.end() <= new_end, "post-condition");
+
+    size_t actual_expand_bytes = mr.byte_size();
+    assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
+    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
+           "post-condition");
+    if (actual_expand_bytes < aligned_expand_bytes) {
+      // We could not expand _hrs to the desired size. In this case we
+      // need to shrink the committed space accordingly.
+      assert(mr.end() < new_end, "invariant");
+
+      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
+      // First uncommit the memory.
+      _g1_storage.shrink_by(diff_bytes);
+      // Then propagate this update to the necessary data structures.
+      update_committed_space(new_end, mr.end());
     }
-    assert(base == new_end, "sanity");
-
-    // Now update max_committed if necessary.
-    _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
-
+    _free_list.add_as_tail(&expansion_list);
+
+    if (_hr_printer.is_active()) {
+      HeapWord* curr = mr.start();
+      while (curr < mr.end()) {
+        HeapWord* curr_end = curr + HeapRegion::GrainWords;
+        _hr_printer.commit(curr, curr_end);
+        curr = curr_end;
+      }
+      assert(curr == mr.end(), "post-condition");
+    }
   } else {
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
@@ -1636,37 +1740,41 @@
   return successful;
 }
 
-void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
-{
+void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
   size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
   size_t num_regions_deleted = 0;
-  MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
-
-  assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
-  if (mr.byte_size() > 0)
+  MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
+  HeapWord* old_end = (HeapWord*) _g1_storage.high();
+  assert(mr.end() == old_end, "post-condition");
+  if (mr.byte_size() > 0) {
+    if (_hr_printer.is_active()) {
+      HeapWord* curr = mr.end();
+      while (curr > mr.start()) {
+        HeapWord* curr_end = curr;
+        curr -= HeapRegion::GrainWords;
+        _hr_printer.uncommit(curr, curr_end);
+      }
+      assert(curr == mr.start(), "post-condition");
+    }
+
     _g1_storage.shrink_by(mr.byte_size());
-  assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
-
-  _g1_committed.set_end(mr.start());
-  _expansion_regions += num_regions_deleted;
-
-  // Tell the cardtable about it.
-  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-
-  // And the offset table as well.
-  _bot_shared->resize(_g1_committed.word_size());
-
-  HeapRegionRemSet::shrink_heap(n_regions());
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _g1_storage.committed_size();
-    gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
-                           old_mem_size/K, aligned_shrink_bytes/K,
-                           new_mem_size/K);
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+    assert(mr.start() == new_end, "post-condition");
+
+    _expansion_regions += num_regions_deleted;
+    update_committed_space(old_end, new_end);
+    HeapRegionRemSet::shrink_heap(n_regions());
+
+    if (Verbose && PrintGC) {
+      size_t new_mem_size = _g1_storage.committed_size();
+      gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
+                             old_mem_size/K, aligned_shrink_bytes/K,
+                             new_mem_size/K);
+    }
   }
 }
 
@@ -1681,6 +1789,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_lists();
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 }
 
@@ -1768,6 +1877,10 @@
 
   MutexLocker x(Heap_lock);
 
+  // We have to initialize the printer before committing the heap, as
+  // it will be used then.
+  _hr_printer.set_active(G1PrintHeapRegions);
+
   // While there are no constraints in the GC code that HeapWordSize
   // be any particular value, there are multiple other areas in the
   // system which believe this to be true (e.g. oop->object_size in some
@@ -1859,9 +1972,9 @@
 
   _g1_storage.initialize(g1_rs, 0);
   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
-  _g1_max_committed = _g1_committed;
-  _hrs = new HeapRegionSeq(_expansion_regions);
-  guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
+  _hrs.initialize((HeapWord*) _g1_reserved.start(),
+                  (HeapWord*) _g1_reserved.end(),
+                  _expansion_regions);
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
@@ -1960,8 +2073,9 @@
   // Here we allocate the dummy full region that is required by the
   // G1AllocRegion class. If we don't pass an address in the reserved
   // space here, lots of asserts fire.
-  MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
-  HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
+
+  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
+                                             _g1_reserved.start());
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
@@ -2069,7 +2183,7 @@
 
 size_t G1CollectedHeap::recalculate_used() const {
   SumUsedClosure blk;
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   return blk.result();
 }
 
@@ -2089,7 +2203,7 @@
 
 size_t G1CollectedHeap::recalculate_used_regions() const {
   SumUsedRegionsClosure blk;
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   return blk.result();
 }
 #endif // PRODUCT
@@ -2254,8 +2368,8 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_g1_committed.contains(p)) {
-    HeapRegion* hr = _hrs->addr_to_region(p);
+  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) p);
+  if (hr != NULL) {
     return hr->is_in(p);
   } else {
     return _perm_gen->as_gen()->is_in(p);
@@ -2283,7 +2397,7 @@
 
 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
   IterateOopClosureRegionClosure blk(_g1_committed, cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->oop_iterate(cl);
   }
@@ -2291,7 +2405,7 @@
 
 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
   IterateOopClosureRegionClosure blk(mr, cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->oop_iterate(cl);
   }
@@ -2313,7 +2427,7 @@
 
 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
   IterateObjectClosureRegionClosure blk(cl);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
   if (do_perm) {
     perm_gen()->object_iterate(cl);
   }
@@ -2338,24 +2452,17 @@
 
 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
   SpaceClosureRegionClosure blk(cl);
-  _hrs->iterate(&blk);
-}
-
-void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
-  _hrs->iterate(cl);
+  heap_region_iterate(&blk);
+}
+
+void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
+  _hrs.iterate(cl);
 }
 
 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
-                                               HeapRegionClosure* cl) {
-  _hrs->iterate_from(r, cl);
-}
-
-void
-G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
-  _hrs->iterate_from(idx, cl);
-}
-
-HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
+                                               HeapRegionClosure* cl) const {
+  _hrs.iterate_from(r, cl);
+}
 
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
@@ -2537,7 +2644,7 @@
 }
 
 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
-  return _hrs->length() > 0 ? _hrs->at(0) : NULL;
+  return n_regions() > 0 ? region_at(0) : NULL;
 }
 
 
@@ -2592,11 +2699,6 @@
   }
 }
 
-size_t G1CollectedHeap::large_typearray_limit() {
-  // FIXME
-  return HeapRegion::GrainBytes/HeapWordSize;
-}
-
 size_t G1CollectedHeap::max_capacity() const {
   return _g1_reserved.byte_size();
 }
@@ -2614,17 +2716,18 @@
 }
 
 class VerifyLivenessOopClosure: public OopClosure {
-  G1CollectedHeap* g1h;
+  G1CollectedHeap* _g1h;
+  VerifyOption _vo;
 public:
-  VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
-    g1h = _g1h;
-  }
+  VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
+    _g1h(g1h), _vo(vo)
+  { }
   void do_oop(narrowOop *p) { do_oop_work(p); }
   void do_oop(      oop *p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T *p) {
     oop obj = oopDesc::load_decode_heap_oop(p);
-    guarantee(obj == NULL || !g1h->is_obj_dead(obj),
+    guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
               "Dead object referenced by a not dead object");
   }
 };
@@ -2634,18 +2737,30 @@
   G1CollectedHeap* _g1h;
   size_t _live_bytes;
   HeapRegion *_hr;
-  bool _use_prev_marking;
+  VerifyOption _vo;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
-    : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
+    : _live_bytes(0), _hr(hr), _vo(vo) {
     _g1h = G1CollectedHeap::heap();
   }
   void do_object(oop o) {
-    VerifyLivenessOopClosure isLive(_g1h);
+    VerifyLivenessOopClosure isLive(_g1h, _vo);
     assert(o != NULL, "Huh?");
-    if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
+    if (!_g1h->is_obj_dead_cond(o, _vo)) {
+      // If the object is alive according to the mark word,
+      // then verify that the marking information agrees.
+      // Note we can't verify the contra-positive of the
+      // above: if the object is dead (according to the mark
+      // word), it may not be marked, or may have been marked
+      // but has since became dead, or may have been allocated
+      // since the last marking.
+      if (_vo == VerifyOption_G1UseMarkWord) {
+        guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
+      }
+
       o->oop_iterate(&isLive);
       if (!_hr->obj_allocated_since_prev_marking(o)) {
         size_t obj_size = o->size();    // Make sure we don't overflow
@@ -2687,17 +2802,18 @@
 
 class VerifyRegionClosure: public HeapRegionClosure {
 private:
-  bool _allow_dirty;
-  bool _par;
-  bool _use_prev_marking;
-  bool _failures;
+  bool         _allow_dirty;
+  bool         _par;
+  VerifyOption _vo;
+  bool         _failures;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyRegionClosure(bool allow_dirty, bool par, VerifyOption vo)
     : _allow_dirty(allow_dirty),
       _par(par),
-      _use_prev_marking(use_prev_marking),
+      _vo(vo),
       _failures(false) {}
 
   bool failures() {
@@ -2709,11 +2825,11 @@
               "Should be unclaimed at verify points.");
     if (!r->continuesHumongous()) {
       bool failures = false;
-      r->verify(_allow_dirty, _use_prev_marking, &failures);
+      r->verify(_allow_dirty, _vo, &failures);
       if (failures) {
         _failures = true;
       } else {
-        VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
+        VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
         r->object_iterate(&not_dead_yet_cl);
         if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
           gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
@@ -2733,14 +2849,15 @@
 class VerifyRootsClosure: public OopsInGenClosure {
 private:
   G1CollectedHeap* _g1h;
-  bool             _use_prev_marking;
+  VerifyOption     _vo;
   bool             _failures;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyRootsClosure(bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyRootsClosure(VerifyOption vo) :
     _g1h(G1CollectedHeap::heap()),
-    _use_prev_marking(use_prev_marking),
+    _vo(vo),
     _failures(false) { }
 
   bool failures() { return _failures; }
@@ -2749,9 +2866,12 @@
     T heap_oop = oopDesc::load_heap_oop(p);
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
+      if (_g1h->is_obj_dead_cond(obj, _vo)) {
         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
+        if (_vo == VerifyOption_G1UseMarkWord) {
+          gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
+        }
         obj->print_on(gclog_or_tty);
         _failures = true;
       }
@@ -2767,19 +2887,19 @@
 class G1ParVerifyTask: public AbstractGangTask {
 private:
   G1CollectedHeap* _g1h;
-  bool _allow_dirty;
-  bool _use_prev_marking;
-  bool _failures;
+  bool             _allow_dirty;
+  VerifyOption     _vo;
+  bool             _failures;
 
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
-                  bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty, VerifyOption vo) :
     AbstractGangTask("Parallel verify task"),
     _g1h(g1h),
     _allow_dirty(allow_dirty),
-    _use_prev_marking(use_prev_marking),
+    _vo(vo),
     _failures(false) { }
 
   bool failures() {
@@ -2788,7 +2908,7 @@
 
   void work(int worker_i) {
     HandleMark hm;
-    VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
+    VerifyRegionClosure blk(_allow_dirty, true, _vo);
     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
                                           HeapRegion::ParVerifyClaimValue);
     if (blk.failures()) {
@@ -2798,19 +2918,21 @@
 };
 
 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
-  verify(allow_dirty, silent, /* use_prev_marking */ true);
+  verify(allow_dirty, silent, VerifyOption_G1UsePrevMarking);
 }
 
 void G1CollectedHeap::verify(bool allow_dirty,
                              bool silent,
-                             bool use_prev_marking) {
+                             VerifyOption vo) {
   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
     if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
-    VerifyRootsClosure rootsCl(use_prev_marking);
+    VerifyRootsClosure rootsCl(vo);
     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
+
     // We apply the relevant closures to all the oops in the
     // system dictionary, the string table and the code cache.
     const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+
     process_strong_roots(true,      // activate StrongRootsScope
                          true,      // we set "collecting perm gen" to true,
                                     // so we don't reset the dirty cards in the perm gen.
@@ -2818,21 +2940,37 @@
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
-    // Since we used "collecting_perm_gen" == true above, we will not have
-    // checked the refs from perm into the G1-collected heap. We check those
-    // references explicitly below. Whether the relevant cards are dirty
-    // is checked further below in the rem set verification.
-    if (!silent) { gclog_or_tty->print("Permgen roots "); }
-    perm_gen()->oop_iterate(&rootsCl);
+
+    // If we're verifying after the marking phase of a Full GC then we can't
+    // treat the perm gen as roots into the G1 heap. Some of the objects in
+    // the perm gen may be dead and hence not marked. If one of these dead
+    // objects is considered to be a root then we may end up with a false
+    // "Root location <x> points to dead ob <y>" failure.
+    if (vo != VerifyOption_G1UseMarkWord) {
+      // Since we used "collecting_perm_gen" == true above, we will not have
+      // checked the refs from perm into the G1-collected heap. We check those
+      // references explicitly below. Whether the relevant cards are dirty
+      // is checked further below in the rem set verification.
+      if (!silent) { gclog_or_tty->print("Permgen roots "); }
+      perm_gen()->oop_iterate(&rootsCl);
+    }
     bool failures = rootsCl.failures();
-    if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
-    verify_region_sets();
+
+    if (vo != VerifyOption_G1UseMarkWord) {
+      // If we're verifying during a full GC then the region sets
+      // will have been torn down at the start of the GC. Therefore
+      // verifying the region sets will fail. So we only verify
+      // the region sets when not in a full GC.
+      if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
+      verify_region_sets();
+    }
+
     if (!silent) { gclog_or_tty->print("HeapRegions "); }
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
              "sanity check");
 
-      G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
+      G1ParVerifyTask task(this, allow_dirty, vo);
       int n_workers = workers()->total_workers();
       set_par_threads(n_workers);
       workers()->run_task(&task);
@@ -2849,8 +2987,8 @@
       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
              "sanity check");
     } else {
-      VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
-      _hrs->iterate(&blk);
+      VerifyRegionClosure blk(allow_dirty, false, vo);
+      heap_region_iterate(&blk);
       if (blk.failures()) {
         failures = true;
       }
@@ -2865,7 +3003,7 @@
 #ifndef PRODUCT
       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
         concurrent_mark()->print_reachable("at-verification-failure",
-                                           use_prev_marking, false /* all */);
+                                           vo, false /* all */);
       }
 #endif
       gclog_or_tty->flush();
@@ -2919,7 +3057,7 @@
 
 void G1CollectedHeap::print_on_extended(outputStream* st) const {
   PrintRegionClosure blk(st);
-  _hrs->iterate(&blk);
+  heap_region_iterate(&blk);
 }
 
 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
@@ -2958,14 +3096,55 @@
   SpecializationStats::print();
 }
 
-int G1CollectedHeap::addr_to_arena_id(void* addr) const {
-  HeapRegion* hr = heap_region_containing(addr);
-  if (hr == NULL) {
-    return 0;
-  } else {
-    return 1;
-  }
-}
+#ifndef PRODUCT
+// Helpful for debugging RSet issues.
+
+class PrintRSetsClosure : public HeapRegionClosure {
+private:
+  const char* _msg;
+  size_t _occupied_sum;
+
+public:
+  bool doHeapRegion(HeapRegion* r) {
+    HeapRegionRemSet* hrrs = r->rem_set();
+    size_t occupied = hrrs->occupied();
+    _occupied_sum += occupied;
+
+    gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
+                           HR_FORMAT_PARAMS(r));
+    if (occupied == 0) {
+      gclog_or_tty->print_cr("  RSet is empty");
+    } else {
+      hrrs->print();
+    }
+    gclog_or_tty->print_cr("----------");
+    return false;
+  }
+
+  PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
+    gclog_or_tty->cr();
+    gclog_or_tty->print_cr("========================================");
+    gclog_or_tty->print_cr(msg);
+    gclog_or_tty->cr();
+  }
+
+  ~PrintRSetsClosure() {
+    gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
+    gclog_or_tty->print_cr("========================================");
+    gclog_or_tty->cr();
+  }
+};
+
+void G1CollectedHeap::print_cset_rsets() {
+  PrintRSetsClosure cl("Printing CSet RSets");
+  collection_set_iterate(&cl);
+}
+
+void G1CollectedHeap::print_all_rsets() {
+  PrintRSetsClosure cl("Printing All RSets");;
+  heap_region_iterate(&cl);
+}
+#endif // PRODUCT
 
 G1CollectedHeap* G1CollectedHeap::heap() {
   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
@@ -3022,24 +3201,6 @@
   }
 }
 
-class VerifyMarkedObjsClosure: public ObjectClosure {
-    G1CollectedHeap* _g1h;
-    public:
-    VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
-    void do_object(oop obj) {
-      assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
-             "markandsweep mark should agree with concurrent deadness");
-    }
-};
-
-void
-G1CollectedHeap::checkConcurrentMark() {
-    VerifyMarkedObjsClosure verifycl(this);
-    //    MutexLockerEx x(getMarkBitMapLock(),
-    //              Mutex::_no_safepoint_check_flag);
-    object_iterate(&verifycl, false);
-}
-
 void G1CollectedHeap::do_sync_mark() {
   _cm->checkpointRootsInitial();
   _cm->markFromRoots();
@@ -3118,12 +3279,27 @@
 
 // </NEW PREDICTION>
 
-struct PrepareForRSScanningClosure : public HeapRegionClosure {
-  bool doHeapRegion(HeapRegion *r) {
-    r->rem_set()->set_iter_claimed(0);
+#ifdef ASSERT
+class VerifyCSetClosure: public HeapRegionClosure {
+public:
+  bool doHeapRegion(HeapRegion* hr) {
+    // Here we check that the CSet region's RSet is ready for parallel
+    // iteration. The fields that we'll verify are only manipulated
+    // when the region is part of a CSet and is collected. Afterwards,
+    // we reset these fields when we clear the region's RSet (when the
+    // region is freed) so they are ready when the region is
+    // re-allocated. The only exception to this is if there's an
+    // evacuation failure and instead of freeing the region we leave
+    // it in the heap. In that case, we reset these fields during
+    // evacuation failure handling.
+    guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
+
+    // Here's a good place to add any other checks we'd like to
+    // perform on CSet regions.
     return false;
   }
 };
+#endif // ASSERT
 
 #if TASKQUEUE_STATS
 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
@@ -3227,16 +3403,14 @@
       gc_prologue(false);
       increment_total_collections(false /* full gc */);
 
-#if G1_REM_SET_LOGGING
-      gclog_or_tty->print_cr("\nJust chose CS, heap:");
-      print();
-#endif
-
       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyBeforeGC:");
         prepare_for_verify();
-        Universe::verify(false);
+        Universe::verify(/* allow dirty */ false,
+                         /* silent      */ false,
+                         /* option      */ VerifyOption_G1UsePrevMarking);
+
       }
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -3253,6 +3427,11 @@
       // of the collection set!).
       release_mutator_alloc_region();
 
+      // We should call this after we retire the mutator alloc
+      // region(s) so that all the ALLOC / RETIRE events are generated
+      // before the start GC event.
+      _hr_printer.start_gc(false /* full */, (size_t) total_collections());
+
       // The elapsed time induced by the start time below deliberately elides
       // the possible verification above.
       double start_time_sec = os::elapsedTime();
@@ -3292,8 +3471,9 @@
       // progress, this will be zero.
       _cm->set_oops_do_bound();
 
-      if (mark_in_progress())
+      if (mark_in_progress()) {
         concurrent_mark()->newCSet();
+      }
 
 #if YOUNG_LIST_VERBOSE
       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
@@ -3303,13 +3483,36 @@
 
       g1_policy()->choose_collection_set(target_pause_time_ms);
 
-      // Nothing to do if we were unable to choose a collection set.
-#if G1_REM_SET_LOGGING
-      gclog_or_tty->print_cr("\nAfter pause, heap:");
-      print();
-#endif
-      PrepareForRSScanningClosure prepare_for_rs_scan;
-      collection_set_iterate(&prepare_for_rs_scan);
+      if (_hr_printer.is_active()) {
+        HeapRegion* hr = g1_policy()->collection_set();
+        while (hr != NULL) {
+          G1HRPrinter::RegionType type;
+          if (!hr->is_young()) {
+            type = G1HRPrinter::Old;
+          } else if (hr->is_survivor()) {
+            type = G1HRPrinter::Survivor;
+          } else {
+            type = G1HRPrinter::Eden;
+          }
+          _hr_printer.cset(hr);
+          hr = hr->next_in_collection_set();
+        }
+      }
+
+      // We have chosen the complete collection set. If marking is
+      // active then, we clear the region fields of any of the
+      // concurrent marking tasks whose region fields point into
+      // the collection set as these values will become stale. This
+      // will cause the owning marking threads to claim a new region
+      // when marking restarts.
+      if (mark_in_progress()) {
+        concurrent_mark()->reset_active_task_region_fields_in_cset();
+      }
+
+#ifdef ASSERT
+      VerifyCSetClosure cl;
+      collection_set_iterate(&cl);
+#endif // ASSERT
 
       setup_surviving_young_words();
 
@@ -3397,7 +3600,9 @@
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyAfterGC:");
         prepare_for_verify();
-        Universe::verify(false);
+        Universe::verify(/* allow dirty */ true,
+                         /* silent      */ false,
+                         /* option      */ VerifyOption_G1UsePrevMarking);
       }
 
       if (was_enabled) ref_processor()->enable_discovery();
@@ -3415,6 +3620,15 @@
         }
       }
 
+      // We should do this after we potentially expand the heap so
+      // that all the COMMIT events are generated before the end GC
+      // event, and after we retire the GC alloc regions so that all
+      // RETIRE events are generated before the end GC event.
+      _hr_printer.end_gc(false /* full */, (size_t) total_collections());
+
+      // We have to do this after we decide whether to expand the heap or not.
+      g1_policy()->print_heap_transition();
+
       if (mark_in_progress()) {
         concurrent_mark()->update_g1_committed();
       }
@@ -3433,6 +3647,7 @@
     }
   }
 
+  _hrs.verify_optional();
   verify_region_sets_optional();
 
   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
@@ -3565,8 +3780,8 @@
 public:
   bool doHeapRegion(HeapRegion* r) {
     if (r->is_gc_alloc_region()) {
-      gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
-                             r->hrs_index(), r->bottom());
+      gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region",
+                             HR_FORMAT_PARAMS(r));
     }
     return false;
   }
@@ -3650,11 +3865,8 @@
     } else {
       // the region was retained from the last collection
       ++_gc_alloc_region_counts[ap];
-      if (G1PrintHeapRegions) {
-        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
-                               "top "PTR_FORMAT,
-                               alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
-      }
+
+      _hr_printer.reuse(alloc_region);
     }
 
     if (alloc_region != NULL) {
@@ -3907,6 +4119,14 @@
       assert(cur->in_collection_set(), "bad CS");
       RemoveSelfPointerClosure rspc(_g1h, cur, cl);
 
+      // In the common case we make sure that this is done when the
+      // region is freed so that it is "ready-to-go" when it's
+      // re-allocated. However, when evacuation failure happens, a
+      // region will remain in the heap and might ultimately be added
+      // to a CSet in the future. So we have to be careful here and
+      // make sure the region's RSet is ready for parallel iteration
+      // whenever this might be required in the future.
+      cur->rem_set()->reset_for_par_iteration();
       cur->reset_bot();
       cl->set_region(cur);
       cur->object_iterate(&rspc);
@@ -4019,11 +4239,7 @@
   HeapRegion* r = heap_region_containing(old);
   if (!r->evacuation_failed()) {
     r->set_evacuation_failed(true);
-    if (G1PrintHeapRegions) {
-      gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
-                          "["PTR_FORMAT","PTR_FORMAT")\n",
-                          r, r->bottom(), r->end());
-    }
+    _hr_printer.evac_failure(r);
   }
 
   push_on_evac_failure_scan_stack(old);
@@ -4084,6 +4300,7 @@
   // Now we can do the post-GC stuff on the region.
   alloc_region->note_end_of_copying();
   g1_policy()->record_after_bytes(alloc_region->used());
+  _hr_printer.retire(alloc_region);
 }
 
 HeapWord*
@@ -4426,10 +4643,6 @@
 
   // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
-#if G1_REM_SET_LOGGING
-    gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
-                           "into CS.", p, (void*) obj);
-#endif
     if (obj->is_forwarded()) {
       oopDesc::encode_store_heap_oop(p, obj->forwardee());
     } else {
@@ -4864,10 +5077,10 @@
   hr->set_notHumongous();
   free_region(hr, &hr_pre_used, free_list, par);
 
-  int i = hr->hrs_index() + 1;
+  size_t i = hr->hrs_index() + 1;
   size_t num = 1;
-  while ((size_t) i < n_regions()) {
-    HeapRegion* curr_hr = _hrs->at(i);
+  while (i < n_regions()) {
+    HeapRegion* curr_hr = region_at(i);
     if (!curr_hr->continuesHumongous()) {
       break;
     }
@@ -5227,16 +5440,6 @@
   }
 }
 
-size_t G1CollectedHeap::n_regions() {
-  return _hrs->length();
-}
-
-size_t G1CollectedHeap::max_regions() {
-  return
-    (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
-    HeapRegion::GrainBytes;
-}
-
 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
@@ -5367,12 +5570,14 @@
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   assert(!force || g1_policy()->can_expand_young_list(),
          "if force is true we should be able to expand the young list");
-  if (force || !g1_policy()->is_young_list_full()) {
+  bool young_list_full = g1_policy()->is_young_list_full();
+  if (force || !young_list_full) {
     HeapRegion* new_alloc_region = new_region(word_size,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
       g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
+      _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
       g1mm()->update_eden_counters();
       return new_alloc_region;
     }
@@ -5387,6 +5592,7 @@
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
   _summary_bytes_used += allocated_bytes;
+  _hr_printer.retire(alloc_region);
 }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
@@ -5433,6 +5639,15 @@
   }
 };
 
+HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
+                                             HeapWord* bottom) {
+  HeapWord* end = bottom + HeapRegion::GrainWords;
+  MemRegion mr(bottom, end);
+  assert(_g1_reserved.contains(mr), "invariant");
+  // This might return NULL if the allocation fails
+  return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
+}
+
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -27,8 +27,10 @@
 
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
@@ -42,7 +44,6 @@
 // heap subsets that will yield large amounts of garbage.
 
 class HeapRegion;
-class HeapRegionSeq;
 class HRRSCleanupTask;
 class PermanentGenerationSpec;
 class GenerationSpec;
@@ -103,6 +104,19 @@
   size_t       length() { return _length; }
   size_t       survivor_length() { return _survivor_length; }
 
+  // Currently we do not keep track of the used byte sum for the
+  // young list and the survivors and it'd be quite a lot of work to
+  // do so. When we'll eventually replace the young list with
+  // instances of HeapRegionLinkedList we'll get that for free. So,
+  // we'll report the more accurate information then.
+  size_t       eden_used_bytes() {
+    assert(length() >= survivor_length(), "invariant");
+    return (length() - survivor_length()) * HeapRegion::GrainBytes;
+  }
+  size_t       survivor_used_bytes() {
+    return survivor_length() * HeapRegion::GrainBytes;
+  }
+
   void rs_length_sampling_init();
   bool rs_length_sampling_more();
   void rs_length_sampling_next();
@@ -183,9 +197,6 @@
   // The part of _g1_storage that is currently committed.
   MemRegion _g1_committed;
 
-  // The maximum part of _g1_storage that has ever been committed.
-  MemRegion _g1_max_committed;
-
   // The master free list. It will satisfy all new region allocations.
   MasterFreeRegionList      _free_list;
 
@@ -209,7 +220,7 @@
   void rebuild_region_lists();
 
   // The sequence of all heap regions in the heap.
-  HeapRegionSeq* _hrs;
+  HeapRegionSeq _hrs;
 
   // Alloc region used to satisfy mutator allocation requests.
   MutatorAllocRegion _mutator_alloc_region;
@@ -288,6 +299,8 @@
 
   size_t* _surviving_young_words;
 
+  G1HRPrinter _hr_printer;
+
   void setup_surviving_young_words();
   void update_surviving_young_words(size_t* surv_young_words);
   void cleanup_surviving_young_words();
@@ -408,13 +421,15 @@
   // Attempt to satisfy a humongous allocation request of the given
   // size by finding a contiguous set of free regions of num_regions
   // length and remove them from the master free list. Return the
-  // index of the first region or -1 if the search was unsuccessful.
-  int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
+  // index of the first region or G1_NULL_HRS_INDEX if the search
+  // was unsuccessful.
+  size_t humongous_obj_allocate_find_first(size_t num_regions,
+                                           size_t word_size);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
-  HeapWord* humongous_obj_allocate_initialize_regions(int first,
+  HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
                                                       size_t num_regions,
                                                       size_t word_size);
 
@@ -434,8 +449,7 @@
   // * All allocation requests for new TLABs should go to
   //   allocate_new_tlab().
   //
-  // * All non-TLAB allocation requests should go to mem_allocate()
-  //   and mem_allocate() should never be called with is_tlab == true.
+  // * All non-TLAB allocation requests should go to mem_allocate().
   //
   // * If either call cannot satisfy the allocation request using the
   //   current allocating region, they will try to get a new one. If
@@ -455,8 +469,6 @@
   virtual HeapWord* allocate_new_tlab(size_t word_size);
 
   virtual HeapWord* mem_allocate(size_t word_size,
-                                 bool   is_noref,
-                                 bool   is_tlab, /* expected to be false */
                                  bool*  gc_overhead_limit_was_exceeded);
 
   // The following three methods take a gc_count_before_ret
@@ -574,8 +586,8 @@
   void register_region_with_in_cset_fast_test(HeapRegion* r) {
     assert(_in_cset_fast_test_base != NULL, "sanity");
     assert(r->in_collection_set(), "invariant");
-    int index = r->hrs_index();
-    assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
+    size_t index = r->hrs_index();
+    assert(index < _in_cset_fast_test_length, "invariant");
     assert(!_in_cset_fast_test_base[index], "invariant");
     _in_cset_fast_test_base[index] = true;
   }
@@ -626,6 +638,8 @@
     return _full_collections_completed;
   }
 
+  G1HRPrinter* hr_printer() { return &_hr_printer; }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -741,6 +755,11 @@
                              HumongousRegionSet* humongous_proxy_set,
                              bool par);
 
+  // Notifies all the necessary spaces that the committed space has
+  // been updated (either expanded or shrunk). It should be called
+  // after _g1_storage is updated.
+  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
   ConcurrentMarkThread* _cmThread;
@@ -803,7 +822,6 @@
   oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
-
   // Ensure that the relevant gc_alloc regions are set.
   void get_gc_alloc_regions();
   // We're done with GC alloc regions. We are going to tear down the
@@ -954,15 +972,13 @@
   }
 
   // The total number of regions in the heap.
-  size_t n_regions();
+  size_t n_regions() { return _hrs.length(); }
+
+  // The max number of regions in the heap.
+  size_t max_regions() { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  size_t max_regions();
-
-  // The number of regions that are completely free.
-  size_t free_regions() {
-    return _free_list.length();
-  }
+  size_t free_regions() { return _free_list.length(); }
 
   // The number of regions that are not completely free.
   size_t used_regions() { return n_regions() - free_regions(); }
@@ -970,6 +986,10 @@
   // The number of regions available for "regular" expansion.
   size_t expansion_regions() { return _expansion_regions; }
 
+  // Factory method for HeapRegion instances. It will return NULL if
+  // the allocation fails.
+  HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
+
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
@@ -1131,17 +1151,15 @@
 
   // Iterate over heap regions, in address order, terminating the
   // iteration early if the "doHeapRegion" method returns "true".
-  void heap_region_iterate(HeapRegionClosure* blk);
+  void heap_region_iterate(HeapRegionClosure* blk) const;
 
   // Iterate over heap regions starting with r (or the first region if "r"
   // is NULL), in address order, terminating early if the "doHeapRegion"
   // method returns "true".
-  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk);
+  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
 
-  // As above but starting from the region at index idx.
-  void heap_region_iterate_from(int idx, HeapRegionClosure* blk);
-
-  HeapRegion* region_at(size_t idx);
+  // Return the region with the given index. It assumes the index is valid.
+  HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
 
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
@@ -1182,12 +1200,14 @@
 
   // A G1CollectedHeap will contain some number of heap regions.  This
   // finds the region containing a given address, or else returns NULL.
-  HeapRegion* heap_region_containing(const void* addr) const;
+  template <class T>
+  inline HeapRegion* heap_region_containing(const T addr) const;
 
   // Like the above, but requires "addr" to be in the heap (to avoid a
   // null-check), and unlike the above, may return an continuing humongous
   // region.
-  HeapRegion* heap_region_containing_raw(const void* addr) const;
+  template <class T>
+  inline HeapRegion* heap_region_containing_raw(const T addr) const;
 
   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
   // each address in the (reserved) heap is a member of exactly
@@ -1249,11 +1269,17 @@
     return true;
   }
 
-  bool is_in_young(oop obj) {
+  bool is_in_young(const oop obj) {
     HeapRegion* hr = heap_region_containing(obj);
     return hr != NULL && hr->is_young();
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr);
+
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
@@ -1280,10 +1306,6 @@
     return true;
   }
 
-  // The boundary between a "large" and "small" array of primitives, in
-  // words.
-  virtual size_t large_typearray_limit();
-
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
@@ -1323,14 +1345,20 @@
 
   // Perform verification.
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information
+  // vo == UseMarkWord    -> use the mark word in the object header
+  //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
-  // use_prev_marking == true. Currently, there is only one case where
-  // this is called with use_prev_marking == false, which is to verify
-  // the "next" marking information at the end of remark.
-  void verify(bool allow_dirty, bool silent, bool use_prev_marking);
+  // vo == UsePrevMarking.
+  // Currently, there is only one case where this is called with
+  // vo == UseNextMarking, which is to verify the "next" marking
+  // information at the end of remark.
+  // Currently there is only one place where this is called with
+  // vo == UseMarkWord, which is to verify the marking during a
+  // full GC.
+  void verify(bool allow_dirty, bool silent, VerifyOption vo);
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty, bool silent);
@@ -1349,10 +1377,9 @@
   // Override
   void print_tracing_info() const;
 
-  // If "addr" is a pointer into the (reserved?) heap, returns a positive
-  // number indicating the "arena" within the heap in which "addr" falls.
-  // Or else returns 0.
-  virtual int addr_to_arena_id(void* addr) const;
+  // The following two methods are helpful for debugging RSet issues.
+  void print_cset_rsets() PRODUCT_RETURN;
+  void print_all_rsets() PRODUCT_RETURN;
 
   // Convenience function to be used in situations where the heap type can be
   // asserted to be this type.
@@ -1383,24 +1410,27 @@
   // bitmap off to the side.
   void doConcurrentMark();
 
-  // This is called from the marksweep collector which then does
-  // a concurrent mark and verifies that the results agree with
-  // the stop the world marking.
-  void checkConcurrentMark();
+  // Do a full concurrent marking, synchronously.
   void do_sync_mark();
 
   bool isMarkedPrev(oop obj) const;
   bool isMarkedNext(oop obj) const;
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information,
+  // vo == UseMarkWord    -> use mark word from object header
   bool is_obj_dead_cond(const oop obj,
                         const HeapRegion* hr,
-                        const bool use_prev_marking) const {
-    if (use_prev_marking) {
-      return is_obj_dead(obj, hr);
-    } else {
-      return is_obj_ill(obj, hr);
+                        const VerifyOption vo) const {
+
+    switch (vo) {
+      case VerifyOption_G1UsePrevMarking:
+        return is_obj_dead(obj, hr);
+      case VerifyOption_G1UseNextMarking:
+        return is_obj_ill(obj, hr);
+      default:
+        assert(vo == VerifyOption_G1UseMarkWord, "must be");
+        return !obj->is_gc_marked();
     }
   }
 
@@ -1441,18 +1471,24 @@
   // Added if it is in permanent gen it isn't dead.
   // Added if it is NULL it isn't dead.
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information,
+  // vo == UseMarkWord    -> use mark word from object header
   bool is_obj_dead_cond(const oop obj,
-                        const bool use_prev_marking) {
-    if (use_prev_marking) {
-      return is_obj_dead(obj);
-    } else {
-      return is_obj_ill(obj);
+                        const VerifyOption vo) const {
+
+    switch (vo) {
+      case VerifyOption_G1UsePrevMarking:
+        return is_obj_dead(obj);
+      case VerifyOption_G1UseNextMarking:
+        return is_obj_ill(obj);
+      default:
+        assert(vo == VerifyOption_G1UseMarkWord, "must be");
+        return !obj->is_gc_marked();
     }
   }
 
-  bool is_obj_dead(const oop obj) {
+  bool is_obj_dead(const oop obj) const {
     const HeapRegion* hr = heap_region_containing(obj);
     if (hr == NULL) {
       if (Universe::heap()->is_in_permanent(obj))
@@ -1463,7 +1499,7 @@
     else return is_obj_dead(obj, hr);
   }
 
-  bool is_obj_ill(const oop obj) {
+  bool is_obj_ill(const oop obj) const {
     const HeapRegion* hr = heap_region_containing(obj);
     if (hr == NULL) {
       if (Universe::heap()->is_in_permanent(obj))
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -34,9 +34,10 @@
 
 // Inline functions for G1CollectedHeap
 
+template <class T>
 inline HeapRegion*
-G1CollectedHeap::heap_region_containing(const void* addr) const {
-  HeapRegion* hr = _hrs->addr_to_region(addr);
+G1CollectedHeap::heap_region_containing(const T addr) const {
+  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
   // hr can be null if addr in perm_gen
   if (hr != NULL && hr->continuesHumongous()) {
     hr = hr->humongous_start_region();
@@ -44,19 +45,16 @@
   return hr;
 }
 
+template <class T>
 inline HeapRegion*
-G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
-  assert(_g1_reserved.contains(addr), "invariant");
-  size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
-                                        >> HeapRegion::LogOfHRGrainBytes;
-
-  HeapRegion* res = _hrs->at(index);
-  assert(res == _hrs->addr_to_region(addr), "sanity");
+G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+  assert(_g1_reserved.contains((const void*) addr), "invariant");
+  HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
   return res;
 }
 
 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
-  HeapRegion* r = _hrs->addr_to_region(obj);
+  HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
   return r != NULL && r->in_collection_set();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -239,6 +239,10 @@
   _should_revert_to_full_young_gcs(false),
   _last_full_young_gc(false),
 
+  _eden_bytes_before_gc(0),
+  _survivor_bytes_before_gc(0),
+  _capacity_before_gc(0),
+
   _prev_collection_pause_used_at_end_bytes(0),
 
   _collection_set(NULL),
@@ -897,6 +901,11 @@
   _bytes_in_to_space_after_gc = 0;
   _bytes_in_collection_set_before_gc = 0;
 
+  YoungList* young_list = _g1->young_list();
+  _eden_bytes_before_gc = young_list->eden_used_bytes();
+  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
+  _capacity_before_gc = _g1->capacity();
+
 #ifdef DEBUG
   // initialise these to something well known so that we can spot
   // if they are not set properly
@@ -1460,14 +1469,6 @@
       }
     }
   }
-  if (PrintGCDetails)
-    gclog_or_tty->print("   [");
-  if (PrintGC || PrintGCDetails)
-    _g1->print_size_transition(gclog_or_tty,
-                               _cur_collection_pause_used_at_start_bytes,
-                               _g1->used(), _g1->capacity());
-  if (PrintGCDetails)
-    gclog_or_tty->print_cr("]");
 
   _all_pause_times_ms->add(elapsed_ms);
   if (update_stats) {
@@ -1672,6 +1673,40 @@
   // </NEW PREDICTION>
 }
 
+#define EXT_SIZE_FORMAT "%d%s"
+#define EXT_SIZE_PARAMS(bytes)                                  \
+  byte_size_in_proper_unit((bytes)),                            \
+  proper_unit_for_byte_size((bytes))
+
+void G1CollectorPolicy::print_heap_transition() {
+  if (PrintGCDetails) {
+    YoungList* young_list = _g1->young_list();
+    size_t eden_bytes = young_list->eden_used_bytes();
+    size_t survivor_bytes = young_list->survivor_used_bytes();
+    size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
+    size_t used = _g1->used();
+    size_t capacity = _g1->capacity();
+
+    gclog_or_tty->print_cr(
+         "   [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
+             "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
+             "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
+                     EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
+             EXT_SIZE_PARAMS(_eden_bytes_before_gc),
+               EXT_SIZE_PARAMS(eden_bytes),
+             EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
+               EXT_SIZE_PARAMS(survivor_bytes),
+             EXT_SIZE_PARAMS(used_before_gc),
+             EXT_SIZE_PARAMS(_capacity_before_gc),
+               EXT_SIZE_PARAMS(used),
+               EXT_SIZE_PARAMS(capacity));
+  } else if (PrintGC) {
+    _g1->print_size_transition(gclog_or_tty,
+                               _cur_collection_pause_used_at_start_bytes,
+                               _g1->used(), _g1->capacity());
+  }
+}
+
 // <NEW PREDICTION>
 
 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
@@ -2435,21 +2470,6 @@
   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
 }
 
-class NextNonCSElemFinder: public HeapRegionClosure {
-  HeapRegion* _res;
-public:
-  NextNonCSElemFinder(): _res(NULL) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (!r->in_collection_set()) {
-      _res = r;
-      return true;
-    } else {
-      return false;
-    }
-  }
-  HeapRegion* res() { return _res; }
-};
-
 class KnownGarbageClosure: public HeapRegionClosure {
   CollectionSetChooser* _hrSorted;
 
@@ -2618,14 +2638,6 @@
   assert(_inc_cset_build_state == Active, "Precondition");
   assert(!hr->is_young(), "non-incremental add of young region");
 
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr("added region to cset "
-                           "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                           "top "PTR_FORMAT", %s",
-                           hr->hrs_index(), hr->bottom(), hr->end(),
-                           hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
-  }
-
   if (_g1->mark_in_progress())
     _g1->concurrent_mark()->registerCSetRegion(hr);
 
@@ -2791,14 +2803,6 @@
     _inc_cset_tail->set_next_in_collection_set(hr);
   }
   _inc_cset_tail = hr;
-
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
-                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
-  }
 }
 
 // Add the region to the LHS of the incremental cset
@@ -2816,14 +2820,6 @@
     _inc_cset_tail = hr;
   }
   _inc_cset_head = hr;
-
-  if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
-                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
-  }
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -891,6 +891,7 @@
   virtual void record_collection_pause_end_G1_strong_roots();
 
   virtual void record_collection_pause_end();
+  void print_heap_transition();
 
   // Record the fact that a full collection occurred.
   virtual void record_full_collection_start();
@@ -1179,6 +1180,11 @@
   // The limit on the number of regions allocated for survivors.
   size_t _max_survivor_regions;
 
+  // For reporting purposes.
+  size_t _eden_bytes_before_gc;
+  size_t _survivor_bytes_before_gc;
+  size_t _capacity_before_gc;
+
   // The amount of survor regions after a collection.
   size_t _recorded_survivor_regions;
   // List of survivor regions.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HRPrinter.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "utilities/ostream.hpp"
+
+const char* G1HRPrinter::action_name(ActionType action) {
+  switch(action) {
+    case Alloc:          return "ALLOC";
+    case AllocForce:     return "ALLOC-FORCE";
+    case Retire:         return "RETIRE";
+    case Reuse:          return "REUSE";
+    case CSet:           return "CSET";
+    case EvacFailure:    return "EVAC-FAILURE";
+    case Cleanup:        return "CLEANUP";
+    case PostCompaction: return "POST-COMPACTION";
+    case Commit:         return "COMMIT";
+    case Uncommit:       return "UNCOMMIT";
+    default:             ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+const char* G1HRPrinter::region_type_name(RegionType type) {
+  switch (type) {
+    case Unset:              return NULL;
+    case Eden:               return "Eden";
+    case Survivor:           return "Survivor";
+    case Old:                return "Old";
+    case SingleHumongous:    return "SingleH";
+    case StartsHumongous:    return "StartsH";
+    case ContinuesHumongous: return "ContinuesH";
+    default:                 ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+const char* G1HRPrinter::phase_name(PhaseType phase) {
+  switch (phase) {
+    case StartGC:     return "StartGC";
+    case EndGC:       return "EndGC";
+    case StartFullGC: return "StartFullGC";
+    case EndFullGC:   return "EndFullGC";
+    default:          ShouldNotReachHere();
+  }
+  // trying to keep the Windows compiler happy
+  return NULL;
+}
+
+#define G1HR_PREFIX     " G1HR"
+
+void G1HRPrinter::print(ActionType action, RegionType type,
+                        HeapRegion* hr, HeapWord* top) {
+  const char* action_str = action_name(action);
+  const char* type_str   = region_type_name(type);
+  HeapWord* bottom = hr->bottom();
+
+  if (type_str != NULL) {
+    if (top != NULL) {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT" "PTR_FORMAT,
+                             action_str, type_str, bottom, top);
+    } else {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s(%s) "PTR_FORMAT,
+                             action_str, type_str, bottom);
+    }
+  } else {
+    if (top != NULL) {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT" "PTR_FORMAT,
+                             action_str, bottom, top);
+    } else {
+      gclog_or_tty->print_cr(G1HR_PREFIX" %s "PTR_FORMAT,
+                             action_str, bottom);
+    }
+  }
+}
+
+void G1HRPrinter::print(ActionType action, HeapWord* bottom, HeapWord* end) {
+  const char* action_str = action_name(action);
+
+  gclog_or_tty->print_cr(G1HR_PREFIX" %s ["PTR_FORMAT","PTR_FORMAT"]",
+                         action_str, bottom, end);
+}
+
+void G1HRPrinter::print(PhaseType phase, size_t phase_num) {
+  const char* phase_str = phase_name(phase);
+  gclog_or_tty->print_cr(G1HR_PREFIX" #%s "SIZE_FORMAT, phase_str, phase_num);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HRPrinter.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
+
+#include "memory/allocation.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+
+#define SKIP_RETIRED_FULL_REGIONS 1
+
+class G1HRPrinter VALUE_OBJ_CLASS_SPEC {
+public:
+  typedef enum {
+    Alloc,
+    AllocForce,
+    Retire,
+    Reuse,
+    CSet,
+    EvacFailure,
+    Cleanup,
+    PostCompaction,
+    Commit,
+    Uncommit
+  } ActionType;
+
+  typedef enum {
+    Unset,
+    Eden,
+    Survivor,
+    Old,
+    SingleHumongous,
+    StartsHumongous,
+    ContinuesHumongous
+  } RegionType;
+
+  typedef enum {
+    StartGC,
+    EndGC,
+    StartFullGC,
+    EndFullGC
+  } PhaseType;
+
+private:
+  bool _active;
+
+  static const char* action_name(ActionType action);
+  static const char* region_type_name(RegionType type);
+  static const char* phase_name(PhaseType phase);
+
+  // Print an action event. This version is used in most scenarios and
+  // only prints the region's bottom. The parameters type and top are
+  // optional (the "not set" values are Unset and NULL).
+  static void print(ActionType action, RegionType type,
+                    HeapRegion* hr, HeapWord* top);
+
+  // Print an action event. This version prints both the region's
+  // bottom and end. Used for Commit / Uncommit events.
+  static void print(ActionType action, HeapWord* bottom, HeapWord* end);
+
+  // Print a phase event.
+  static void print(PhaseType phase, size_t phase_num);
+
+public:
+  // In some places we iterate over a list in order to generate output
+  // for the list's elements. By exposing this we can avoid this
+  // iteration if the printer is not active.
+  const bool is_active() { return _active; }
+
+  // Have to set this explicitly as we have to do this during the
+  // heap's initialize() method, not in the constructor.
+  void set_active(bool active) { _active = active; }
+
+  // The methods below are convenient wrappers for the print() methods.
+
+  void alloc(HeapRegion* hr, RegionType type, bool force = false) {
+    if (is_active()) {
+      print((!force) ? Alloc : AllocForce, type, hr, NULL);
+    }
+  }
+
+  void alloc(RegionType type, HeapRegion* hr, HeapWord* top) {
+    if (is_active()) {
+      print(Alloc, type, hr, top);
+    }
+  }
+
+  void retire(HeapRegion* hr) {
+    if (is_active()) {
+      if (!SKIP_RETIRED_FULL_REGIONS || hr->top() < hr->end()) {
+        print(Retire, Unset, hr, hr->top());
+      }
+    }
+  }
+
+  void reuse(HeapRegion* hr) {
+    if (is_active()) {
+      print(Reuse, Unset, hr, NULL);
+    }
+  }
+
+  void cset(HeapRegion* hr) {
+    if (is_active()) {
+      print(CSet, Unset, hr, NULL);
+    }
+  }
+
+  void evac_failure(HeapRegion* hr) {
+    if (is_active()) {
+      print(EvacFailure, Unset, hr, NULL);
+    }
+  }
+
+  void cleanup(HeapRegion* hr) {
+    if (is_active()) {
+      print(Cleanup, Unset, hr, NULL);
+    }
+  }
+
+  void post_compaction(HeapRegion* hr, RegionType type) {
+    if (is_active()) {
+      print(PostCompaction, type, hr, hr->top());
+    }
+  }
+
+  void commit(HeapWord* bottom, HeapWord* end) {
+    if (is_active()) {
+      print(Commit, bottom, end);
+    }
+  }
+
+  void uncommit(HeapWord* bottom, HeapWord* end) {
+    if (is_active()) {
+      print(Uncommit, bottom, end);
+    }
+  }
+
+  void start_gc(bool full, size_t gc_num) {
+    if (is_active()) {
+      if (!full) {
+        print(StartGC, gc_num);
+      } else {
+        print(StartFullGC, gc_num);
+      }
+    }
+  }
+
+  void end_gc(bool full, size_t gc_num) {
+    if (is_active()) {
+      if (!full) {
+        print(EndGC, gc_num);
+      } else {
+        print(EndFullGC, gc_num);
+      }
+    }
+  }
+
+  G1HRPrinter() : _active(false) { }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -84,11 +84,6 @@
 
   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
 
-  if (VerifyDuringGC) {
-      G1CollectedHeap* g1h = G1CollectedHeap::heap();
-      g1h->checkConcurrentMark();
-  }
-
   mark_sweep_phase2();
 
   // Don't add any more derived pointers during phase3
@@ -179,6 +174,29 @@
 
   assert(GenMarkSweep::_marking_stack.is_empty(),
          "stack should be empty by now");
+
+  if (VerifyDuringGC) {
+    HandleMark hm;  // handle scope
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
+    gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
+    Universe::heap()->prepare_for_verify();
+    // Note: we can verify only the heap here. When an object is
+    // marked, the previous value of the mark word (including
+    // identity hash values, ages, etc) is preserved, and the mark
+    // word is set to markOop::marked_value - effectively removing
+    // any hash values from the mark word. These hash values are
+    // used when verifying the dictionaries and so removing them
+    // from the mark word can make verification of the dictionaries
+    // fail. At the end of the GC, the orginal mark word values
+    // (including hash values) are restored to the appropriate
+    // objects.
+    Universe::heap()->verify(/* allow dirty */ true,
+                             /* silent      */ false,
+                             /* option      */ VerifyOption_G1UseMarkWord);
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    gclog_or_tty->print_cr("]");
+  }
 }
 
 class G1PrepareCompactClosure: public HeapRegionClosure {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
 class CMBitMap;
 class CMMarkStack;
 class G1ParScanThreadState;
+class CMTask;
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
@@ -40,7 +41,7 @@
 protected:
   HeapRegion* _from;
 public:
-  virtual void set_region(HeapRegion* from) { _from = from; }
+  void set_region(HeapRegion* from) { _from = from; }
 };
 
 class G1ParClosureSuper : public OopsInHeapRegionClosure {
@@ -161,44 +162,6 @@
   bool do_header() { return false; }
 };
 
-class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
-  G1CollectedHeap* _g1;
-  OopsInHeapRegionClosure* _oc;
-public:
-  FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
-                                     OopsInHeapRegionClosure* oc) :
-    _g1(g1), _oc(oc)
-  {}
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p) { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool do_header() { return false; }
-  void set_region(HeapRegion* from) {
-    _oc->set_region(from);
-  }
-};
-
-class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
-  G1CollectedHeap* _g1;
-  ConcurrentMark* _cm;
-  OopsInHeapRegionClosure* _oc;
-public:
-  FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
-                                            OopsInHeapRegionClosure* oc,
-                                            ConcurrentMark* cm)
-  : _g1(g1), _oc(oc), _cm(cm) { }
-
-  template <class T> void do_oop_nv(T* p);
-  virtual void do_oop(oop* p) { do_oop_nv(p); }
-  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool do_header() { return false; }
-  void set_region(HeapRegion* from) {
-    _oc->set_region(from);
-  }
-};
-
 class FilterOutOfRegionClosure: public OopClosure {
   HeapWord* _r_bottom;
   HeapWord* _r_end;
@@ -214,4 +177,16 @@
   int out_of_region() { return _out_of_region; }
 };
 
+// Closure for iterating over object fields during concurrent marking
+class G1CMOopClosure : public OopClosure {
+  G1CollectedHeap*   _g1h;
+  ConcurrentMark*    _cm;
+  CMTask*            _task;
+public:
+  G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(      oop* p) { do_oop_nv(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
 
-#include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
@@ -66,27 +66,6 @@
   }
 }
 
-template <class T> inline void FilterInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop) &&
-      _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop)))
-    _oc->do_oop(p);
-}
-
-template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj);
-    if (hr != NULL) {
-      if (hr->in_collection_set())
-        _oc->do_oop(p);
-      else if (!hr->is_young())
-        _cm->grayRoot(obj);
-    }
-  }
-}
-
 // This closure is applied to the fields of the objects that have just been copied.
 template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -129,5 +108,18 @@
   }
 }
 
+template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
+  assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
+  assert(!_g1h->is_on_master_free_list(
+                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
+
+  oop obj = oopDesc::load_decode_heap_oop(p);
+  if (_cm->verbose_high()) {
+    gclog_or_tty->print_cr("[%d] we're looking at location "
+                           "*"PTR_FORMAT" = "PTR_FORMAT,
+                           _task->task_id(), p, (void*) obj);
+  }
+  _task->deal_with_reference(obj);
+}
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -66,41 +66,6 @@
 }
 #endif
 
-
-class IntoCSOopClosure: public OopsInHeapRegionClosure {
-  OopsInHeapRegionClosure* _blk;
-  G1CollectedHeap* _g1;
-public:
-  IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
-    _g1(g1), _blk(blk) {}
-  void set_region(HeapRegion* from) {
-    _blk->set_region(from);
-  }
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
-  }
-  bool apply_to_weak_ref_discovered_field() { return true; }
-  bool idempotent() { return true; }
-};
-
-class VerifyRSCleanCardOopClosure: public OopClosure {
-  G1CollectedHeap* _g1;
-public:
-  VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    HeapRegion* to = _g1->heap_region_containing(obj);
-    guarantee(to == NULL || !to->in_collection_set(),
-              "Missed a rem set member.");
-  }
-};
-
 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
   : _g1(g1), _conc_refine_cards(0),
     _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
@@ -332,31 +297,6 @@
   _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
 }
 
-#ifndef PRODUCT
-class PrintRSClosure : public HeapRegionClosure {
-  int _count;
-public:
-  PrintRSClosure() : _count(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-    _count += (int) hrrs->occupied();
-    if (hrrs->occupied() == 0) {
-      gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
-                          "has no remset entries\n",
-                          r->bottom(), r->end());
-    } else {
-      gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
-                          r->bottom(), r->end());
-      r->print();
-      hrrs->print();
-      gclog_or_tty->print("\nDone printing rem set\n");
-    }
-    return false;
-  }
-  int occupied() {return _count;}
-};
-#endif
-
 class CountRSSizeClosure: public HeapRegionClosure {
   size_t _n;
   size_t _tot;
@@ -482,10 +422,6 @@
 }
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
-#if G1_REM_SET_LOGGING
-  PrintRSClosure cl;
-  _g1->collection_set_iterate(&cl);
-#endif
   cleanupHRRS();
   ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
   _g1->set_refine_cte_cl_concurrency(false);
@@ -504,14 +440,6 @@
 }
 
 
-class cleanUpIteratorsClosure : public HeapRegionClosure {
-  bool doHeapRegion(HeapRegion *r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-    hrrs->init_for_par_iteration();
-    return false;
-  }
-};
-
 // This closure, applied to a DirtyCardQueueSet, is used to immediately
 // update the RSets for the regions in the CSet. For each card it iterates
 // through the oops which coincide with that card. It scans the reference
@@ -572,18 +500,13 @@
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   guarantee( _cards_scanned != NULL, "invariant" );
   _total_cards_scanned = 0;
-  for (uint i = 0; i < n_workers(); ++i)
+  for (uint i = 0; i < n_workers(); ++i) {
     _total_cards_scanned += _cards_scanned[i];
+  }
   FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
   _cards_scanned = NULL;
   // Cleanup after copy
-#if G1_REM_SET_LOGGING
-  PrintRSClosure cl;
-  _g1->heap_region_iterate(&cl);
-#endif
   _g1->set_refine_cte_cl_concurrency(true);
-  cleanUpIteratorsClosure iterClosure;
-  _g1->collection_set_iterate(&iterClosure);
   // Set all cards back to clean.
   _g1->cleanUpCardTable();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -142,8 +142,6 @@
   virtual void prepare_for_verify();
 };
 
-#define G1_REM_SET_LOGGING 0
-
 class CountNonCleanMemRegionClosure: public MemRegionClosure {
   G1CollectedHeap* _g1;
   int _n;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -65,12 +65,6 @@
 
   HeapRegion* to = _g1->heap_region_containing(obj);
   if (to != NULL && from != to) {
-#if G1_REM_SET_LOGGING
-    gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
-                           " for region [" PTR_FORMAT ", " PTR_FORMAT ")",
-                           p, obj,
-                           to->bottom(), to->end());
-#endif
     assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
     to->rem_set()->add_reference(p, tid);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,8 +45,7 @@
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
-class FilterInHeapRegionAndIntoCSClosure;
-class FilterAndMarkInHeapRegionAndIntoCSClosure;
+class G1CMOopClosure;
 
 #ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
 #error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
@@ -58,8 +57,7 @@
       f(G1ParPushHeapRSClosure,_nv)                     \
       f(FilterIntoCSClosure,_nv)                        \
       f(FilterOutOfRegionClosure,_nv)                   \
-      f(FilterInHeapRegionAndIntoCSClosure,_nv)         \
-      f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
+      f(G1CMOopClosure,_nv)
 
 #ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
 #error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -60,13 +60,14 @@
   oop _containing_obj;
   bool _failures;
   int _n_failures;
-  bool _use_prev_marking;
+  VerifyOption _vo;
 public:
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
-  VerifyLiveClosure(G1CollectedHeap* g1h, bool use_prev_marking) :
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
     _g1h(g1h), _bs(NULL), _containing_obj(NULL),
-    _failures(false), _n_failures(0), _use_prev_marking(use_prev_marking)
+    _failures(false), _n_failures(0), _vo(vo)
   {
     BarrierSet* bs = _g1h->barrier_set();
     if (bs->is_a(BarrierSet::CardTableModRef))
@@ -95,14 +96,14 @@
 
   template <class T> void do_oop_work(T* p) {
     assert(_containing_obj != NULL, "Precondition");
-    assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
+    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
            "Precondition");
     T heap_oop = oopDesc::load_heap_oop(p);
     if (!oopDesc::is_null(heap_oop)) {
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       bool failed = false;
       if (!_g1h->is_in_closed_subset(obj) ||
-          _g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
+          _g1h->is_obj_dead_cond(obj, _vo)) {
         if (!_failures) {
           gclog_or_tty->print_cr("");
           gclog_or_tty->print_cr("----------");
@@ -159,20 +160,16 @@
               gclog_or_tty->print_cr("----------");
             }
             gclog_or_tty->print_cr("Missing rem set entry:");
-            gclog_or_tty->print_cr("Field "PTR_FORMAT
-                          " of obj "PTR_FORMAT
-                          ", in region %d ["PTR_FORMAT
-                          ", "PTR_FORMAT"),",
-                          p, (void*) _containing_obj,
-                          from->hrs_index(),
-                          from->bottom(),
-                          from->end());
+            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
+                                   "of obj "PTR_FORMAT", "
+                                   "in region "HR_FORMAT,
+                                   p, (void*) _containing_obj,
+                                   HR_FORMAT_PARAMS(from));
             _containing_obj->print_on(gclog_or_tty);
-            gclog_or_tty->print_cr("points to obj "PTR_FORMAT
-                          " in region %d ["PTR_FORMAT
-                          ", "PTR_FORMAT").",
-                          (void*) obj, to->hrs_index(),
-                          to->bottom(), to->end());
+            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
+                                   "in region "HR_FORMAT,
+                                   (void*) obj,
+                                   HR_FORMAT_PARAMS(to));
             obj->print_on(gclog_or_tty);
             gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
                           cv_obj, cv_field);
@@ -484,11 +481,10 @@
 
 
 HeapRegion::
-HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
-                     MemRegion mr, bool is_zeroed)
+HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
+           MemRegion mr, bool is_zeroed)
   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
-    _next_fk(HeapRegionDCTOC::NoFilterKind),
-    _hrs_index(-1),
+    _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false), _is_gc_alloc_region(false),
     _next_in_special_set(NULL), _orig_end(NULL),
@@ -740,20 +736,20 @@
 
 void HeapRegion::verify(bool allow_dirty) const {
   bool dummy = false;
-  verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
+  verify(allow_dirty, VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
 }
 
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
 
 void HeapRegion::verify(bool allow_dirty,
-                        bool use_prev_marking,
+                        VerifyOption vo,
                         bool* failures) const {
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  VerifyLiveClosure vl_cl(g1, use_prev_marking);
+  VerifyLiveClosure vl_cl(g1, vo);
   bool is_humongous = isHumongous();
   bool do_bot_verify = !is_young();
   size_t object_num = 0;
@@ -778,7 +774,7 @@
       return;
     }
 
-    if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
+    if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (obj->is_oop()) {
         klassOop klass = obj->klass();
         if (!klass->is_perm()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -52,9 +52,11 @@
 class HeapRegion;
 class HeapRegionSetBase;
 
-#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
-#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
-                               (_hr_)->top(), (_hr_)->end()
+#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
+#define HR_FORMAT_PARAMS(_hr_) \
+                (_hr_)->hrs_index(), \
+                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
+                (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -237,9 +239,8 @@
   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 
  protected:
-  // If this region is a member of a HeapRegionSeq, the index in that
-  // sequence, otherwise -1.
-  int  _hrs_index;
+  // The index of this region in the heap region sequence.
+  size_t  _hrs_index;
 
   HumongousType _humongous_type;
   // For a humongous region, region in which it starts.
@@ -296,8 +297,7 @@
   enum YoungType {
     NotYoung,                   // a region is not young
     Young,                      // a region is young
-    Survivor                    // a region is young and it contains
-                                // survivor
+    Survivor                    // a region is young and it contains survivors
   };
 
   volatile YoungType _young_type;
@@ -351,7 +351,8 @@
 
  public:
   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
-  HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
+  HeapRegion(size_t hrs_index,
+             G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr, bool is_zeroed);
 
   static int LogOfHRGrainBytes;
@@ -393,8 +394,7 @@
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
-  int hrs_index() const { return _hrs_index; }
-  void set_hrs_index(int index) { _hrs_index = index; }
+  size_t hrs_index() const { return _hrs_index; }
 
   // The number of bytes marked live in the region in the last marking phase.
   size_t marked_bytes()    { return _prev_marked_bytes; }
@@ -579,6 +579,8 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
+  HeapWord* orig_end() { return _orig_end; }
+
   // Allows logical separation between objects allocated before and after.
   void save_marks();
 
@@ -853,14 +855,20 @@
   void print() const;
   void print_on(outputStream* st) const;
 
-  // use_prev_marking == true  -> use "prev" marking information,
-  // use_prev_marking == false -> use "next" marking information
+  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UseNextMarking -> use "next" marking information
+  // vo == UseMarkWord    -> use the mark word in the object header
+  //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
-  // use_prev_marking == true. Currently, there is only one case where
-  // this is called with use_prev_marking == false, which is to verify
-  // the "next" marking information at the end of remark.
-  void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
+  // vo == UsePrevMarking.
+  // Currently, there is only one case where this is called with
+  // vo == UseNextMarking, which is to verify the "next" marking
+  // information at the end of remark.
+  // Currently there is only one place where this is called with
+  // vo == UseMarkWord, which is to verify the marking during a
+  // full GC.
+  void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty) const;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -834,7 +834,7 @@
 #endif
 
   // Set the corresponding coarse bit.
-  int max_hrs_index = max->hr()->hrs_index();
+  size_t max_hrs_index = max->hr()->hrs_index();
   if (!_coarse_map.at(max_hrs_index)) {
     _coarse_map.at_put(max_hrs_index, true);
     _n_coarse_entries++;
@@ -860,7 +860,8 @@
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
   if (G1RSScrubVerbose)
-    gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index());
+    gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
+                           hr()->hrs_index());
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
   if (G1RSScrubVerbose)
@@ -878,7 +879,8 @@
       PosParPRT* nxt = cur->next();
       // If the entire region is dead, eliminate.
       if (G1RSScrubVerbose)
-        gclog_or_tty->print_cr("     For other region %d:", cur->hr()->hrs_index());
+        gclog_or_tty->print_cr("     For other region "SIZE_FORMAT":",
+                               cur->hr()->hrs_index());
       if (!region_bm->at(cur->hr()->hrs_index())) {
         *prev = nxt;
         cur->set_next(NULL);
@@ -994,7 +996,7 @@
 
 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
-  size_t hrs_ind = (size_t)from_hr->hrs_index();
+  size_t hrs_ind = from_hr->hrs_index();
   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   if (del_single_region_table(ind, from_hr)) {
     assert(!_coarse_map.at(hrs_ind), "Inv");
@@ -1002,7 +1004,7 @@
     _coarse_map.par_at_put(hrs_ind, 0);
   }
   // Check to see if any of the fcc entries come from here.
-  int hr_ind = hr()->hrs_index();
+  size_t hr_ind = hr()->hrs_index();
   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
     int fcc_ent = _from_card_cache[tid][hr_ind];
     if (fcc_ent != -1) {
@@ -1083,8 +1085,9 @@
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
-  : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
-
+  : _bosa(bosa), _other_regions(hr) {
+  reset_for_par_iteration();
+}
 
 void HeapRegionRemSet::setup_remset_size() {
   // Setup sparse and fine-grain tables sizes.
@@ -1099,10 +1102,6 @@
   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
 }
 
-void HeapRegionRemSet::init_for_par_iteration() {
-  _iter_state = Unclaimed;
-}
-
 bool HeapRegionRemSet::claim_iter() {
   if (_iter_state != Unclaimed) return false;
   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
@@ -1117,7 +1116,6 @@
   return _iter_state == Complete;
 }
 
-
 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
   iter->initialize(this);
 }
@@ -1130,7 +1128,7 @@
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
-    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
+    gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
   }
   // XXX
   if (iter.n_yielded() != occupied()) {
@@ -1157,6 +1155,14 @@
 void HeapRegionRemSet::clear() {
   _other_regions.clear();
   assert(occupied() == 0, "Should be clear.");
+  reset_for_par_iteration();
+}
+
+void HeapRegionRemSet::reset_for_par_iteration() {
+  _iter_state = Unclaimed;
+  _iter_claimed = 0;
+  // It's good to check this to make sure that the two methods are in sync.
+  assert(verify_ready_for_par_iteration(), "post-condition");
 }
 
 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -262,8 +262,6 @@
   virtual void cleanup() = 0;
 #endif
 
-  // Should be called from single-threaded code.
-  void init_for_par_iteration();
   // Attempt to claim the region.  Returns true iff this call caused an
   // atomic transition from Unclaimed to Claimed.
   bool claim_iter();
@@ -273,7 +271,6 @@
   bool iter_is_complete();
 
   // Support for claiming blocks of cards during iteration
-  void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
   size_t iter_claimed() const { return (size_t)_iter_claimed; }
   // Claim the next block of cards
   size_t iter_claimed_next(size_t step) {
@@ -284,6 +281,11 @@
     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
     return current;
   }
+  void reset_for_par_iteration();
+
+  bool verify_ready_for_par_iteration() {
+    return (_iter_state == Unclaimed) && (_iter_claimed == 0);
+  }
 
   // Initialize the given iterator to iterate over this rem set.
   void init_iterator(HeapRegionRemSetIterator* iter) const;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -23,259 +23,182 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "memory/allocation.hpp"
 
-// Local to this file.
-
-static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
-  if ((*hr1p)->end() <= (*hr2p)->bottom()) return -1;
-  else if ((*hr2p)->end() <= (*hr1p)->bottom()) return 1;
-  else if (*hr1p == *hr2p) return 0;
-  else {
-    assert(false, "We should never compare distinct overlapping regions.");
-  }
-  return 0;
-}
-
-HeapRegionSeq::HeapRegionSeq(const size_t max_size) :
-  _alloc_search_start(0),
-  // The line below is the worst bit of C++ hackery I've ever written
-  // (Detlefs, 11/23).  You should think of it as equivalent to
-  // "_regions(100, true)": initialize the growable array and inform it
-  // that it should allocate its elem array(s) on the C heap.
-  //
-  // The first argument, however, is actually a comma expression
-  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
-  // set_allocation_type() call is to replace the default allocation
-  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
-  // allow to pass the assert in GenericGrowableArray() which checks
-  // that a growable array object must be on C heap if elements are.
-  //
-  // Note: containing object is allocated on C heap since it is CHeapObj.
-  //
-  _regions((ResourceObj::set_allocation_type((address)&_regions,
-                                             ResourceObj::C_HEAP),
-            (int)max_size),
-           true),
-  _next_rr_candidate(0),
-  _seq_bottom(NULL)
-{}
-
-// Private methods.
+// Private
 
-void HeapRegionSeq::print_empty_runs() {
-  int empty_run = 0;
-  int n_empty = 0;
-  int empty_run_start;
-  for (int i = 0; i < _regions.length(); i++) {
-    HeapRegion* r = _regions.at(i);
-    if (r->continuesHumongous()) continue;
-    if (r->is_empty()) {
-      assert(!r->isHumongous(), "H regions should not be empty.");
-      if (empty_run == 0) empty_run_start = i;
-      empty_run++;
-      n_empty++;
-    } else {
-      if (empty_run > 0) {
-        gclog_or_tty->print("  %d:%d", empty_run_start, empty_run);
-        empty_run = 0;
-      }
-    }
-  }
-  if (empty_run > 0) {
-    gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
-  }
-  gclog_or_tty->print_cr(" [tot = %d]", n_empty);
-}
-
-int HeapRegionSeq::find(HeapRegion* hr) {
-  // FIXME: optimized for adjacent regions of fixed size.
-  int ind = hr->hrs_index();
-  if (ind != -1) {
-    assert(_regions.at(ind) == hr, "Mismatch");
-  }
-  return ind;
-}
-
-
-// Public methods.
+size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
+  size_t len = length();
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(from <= len,
+         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
+                 from, len));
 
-void HeapRegionSeq::insert(HeapRegion* hr) {
-  assert(!_regions.is_full(), "Too many elements in HeapRegionSeq");
-  if (_regions.length() == 0
-      || _regions.top()->end() <= hr->bottom()) {
-    hr->set_hrs_index(_regions.length());
-    _regions.append(hr);
-  } else {
-    _regions.append(hr);
-    _regions.sort(orderRegions);
-    for (int i = 0; i < _regions.length(); i++) {
-      _regions.at(i)->set_hrs_index(i);
-    }
-  }
-  char* bot = (char*)_regions.at(0)->bottom();
-  if (_seq_bottom == NULL || bot < _seq_bottom) _seq_bottom = bot;
-}
-
-size_t HeapRegionSeq::length() {
-  return _regions.length();
-}
-
-size_t HeapRegionSeq::free_suffix() {
-  size_t res = 0;
-  int first = _regions.length() - 1;
-  int cur = first;
-  while (cur >= 0 &&
-         (_regions.at(cur)->is_empty()
-          && (first == cur
-              || (_regions.at(cur+1)->bottom() ==
-                  _regions.at(cur)->end())))) {
-      res++;
-      cur--;
-  }
-  return res;
-}
-
-int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
-  assert(num > 1, "pre-condition");
-  assert(0 <= from && from <= _regions.length(),
-         err_msg("from: %d should be valid and <= than %d",
-                 from, _regions.length()));
-
-  int curr = from;
-  int first = -1;
+  size_t curr = from;
+  size_t first = G1_NULL_HRS_INDEX;
   size_t num_so_far = 0;
-  while (curr < _regions.length() && num_so_far < num) {
-    HeapRegion* curr_hr = _regions.at(curr);
-    if (curr_hr->is_empty()) {
-      if (first == -1) {
+  while (curr < len && num_so_far < num) {
+    if (at(curr)->is_empty()) {
+      if (first == G1_NULL_HRS_INDEX) {
         first = curr;
         num_so_far = 1;
       } else {
         num_so_far += 1;
       }
     } else {
-      first = -1;
+      first = G1_NULL_HRS_INDEX;
       num_so_far = 0;
     }
     curr += 1;
   }
-
   assert(num_so_far <= num, "post-condition");
   if (num_so_far == num) {
     // we found enough space for the humongous object
-    assert(from <= first && first < _regions.length(), "post-condition");
-    assert(first < curr && (curr - first) == (int) num, "post-condition");
-    for (int i = first; i < first + (int) num; ++i) {
-      assert(_regions.at(i)->is_empty(), "post-condition");
+    assert(from <= first && first < len, "post-condition");
+    assert(first < curr && (curr - first) == num, "post-condition");
+    for (size_t i = first; i < first + num; ++i) {
+      assert(at(i)->is_empty(), "post-condition");
     }
     return first;
   } else {
     // we failed to find enough space for the humongous object
-    return -1;
+    return G1_NULL_HRS_INDEX;
   }
 }
 
-int HeapRegionSeq::find_contiguous(size_t num) {
-  assert(num > 1, "otherwise we should not be calling this");
-  assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(),
-         err_msg("_alloc_search_start: %d should be valid and <= than %d",
-                 _alloc_search_start, _regions.length()));
+// Public
+
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
+                               size_t max_length) {
+  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
+         "bottom should be heap region aligned");
+  assert((size_t) end % HeapRegion::GrainBytes == 0,
+         "end should be heap region aligned");
+
+  _length = 0;
+  _heap_bottom = bottom;
+  _heap_end = end;
+  _region_shift = HeapRegion::LogOfHRGrainBytes;
+  _next_search_index = 0;
+  _allocated_length = 0;
+  _max_length = max_length;
+
+  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
+  memset(_regions, 0, max_length * sizeof(HeapRegion*));
+  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
+
+  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
+         "bottom should be included in the region with index 0");
+}
+
+MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
+                                   HeapWord* new_end,
+                                   FreeRegionList* list) {
+  assert(old_end < new_end, "don't call it otherwise");
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  HeapWord* next_bottom = old_end;
+  assert(_heap_bottom <= next_bottom, "invariant");
+  while (next_bottom < new_end) {
+    assert(next_bottom < _heap_end, "invariant");
+    size_t index = length();
 
-  int start = _alloc_search_start;
-  int res = find_contiguous_from(start, num);
-  if (res == -1 && start != 0) {
-    // Try starting from the beginning. If _alloc_search_start was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
+    assert(index < _max_length, "otherwise we cannot expand further");
+    if (index == 0) {
+      // We have not allocated any regions so far
+      assert(next_bottom == _heap_bottom, "invariant");
+    } else {
+      // next_bottom should match the end of the last/previous region
+      assert(next_bottom == at(index - 1)->end(), "invariant");
+    }
+
+    if (index == _allocated_length) {
+      // We have to allocate a new HeapRegion.
+      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
+      if (new_hr == NULL) {
+        // allocation failed, we bail out and return what we have done so far
+        return MemRegion(old_end, next_bottom);
+      }
+      assert(_regions[index] == NULL, "invariant");
+      _regions[index] = new_hr;
+      increment_length(&_allocated_length);
+    }
+    // Have to increment the length first, otherwise we will get an
+    // assert failure at(index) below.
+    increment_length(&_length);
+    HeapRegion* hr = at(index);
+    list->add_as_tail(hr);
+
+    next_bottom = hr->end();
   }
-  if (res != -1) {
-    assert(0 <= res && res < _regions.length(),
-           err_msg("res: %d should be valid", res));
-    _alloc_search_start = res + (int) num;
-    assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(),
-           err_msg("_alloc_search_start: %d should be valid",
-                   _alloc_search_start));
+  assert(next_bottom == new_end, "post-condition");
+  return MemRegion(old_end, next_bottom);
+}
+
+size_t HeapRegionSeq::free_suffix() {
+  size_t res = 0;
+  size_t index = length();
+  while (index > 0) {
+    index -= 1;
+    if (!at(index)->is_empty()) {
+      break;
+    }
+    res += 1;
   }
   return res;
 }
 
-void HeapRegionSeq::iterate(HeapRegionClosure* blk) {
-  iterate_from((HeapRegion*)NULL, blk);
+size_t HeapRegionSeq::find_contiguous(size_t num) {
+  assert(num > 1, "use this only for sequences of length 2 or greater");
+  assert(_next_search_index <= length(),
+         err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                 "should be valid and <= than "SIZE_FORMAT,
+                 _next_search_index, length()));
+
+  size_t start = _next_search_index;
+  size_t res = find_contiguous_from(start, num);
+  if (res == G1_NULL_HRS_INDEX && start > 0) {
+    // Try starting from the beginning. If _next_search_index was 0,
+    // no point in doing this again.
+    res = find_contiguous_from(0, num);
+  }
+  if (res != G1_NULL_HRS_INDEX) {
+    assert(res < length(),
+           err_msg("res: "SIZE_FORMAT" should be valid", res));
+    _next_search_index = res + num;
+    assert(_next_search_index <= length(),
+           err_msg("_next_search_indeex: "SIZE_FORMAT" "
+                   "should be valid and <= than "SIZE_FORMAT,
+                   _next_search_index, length()));
+  }
+  return res;
 }
 
-// The first argument r is the heap region at which iteration begins.
-// This operation runs fastest when r is NULL, or the heap region for
-// which a HeapRegionClosure most recently returned true, or the
-// heap region immediately to its right in the sequence.  In all
-// other cases a linear search is required to find the index of r.
-
-void HeapRegionSeq::iterate_from(HeapRegion* r, HeapRegionClosure* blk) {
-
-  // :::: FIXME ::::
-  // Static cache value is bad, especially when we start doing parallel
-  // remembered set update. For now just don't cache anything (the
-  // code in the def'd out blocks).
+void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
+  iterate_from((HeapRegion*) NULL, blk);
+}
 
-#if 0
-  static int cached_j = 0;
-#endif
-  int len = _regions.length();
-  int j = 0;
-  // Find the index of r.
-  if (r != NULL) {
-#if 0
-    assert(cached_j >= 0, "Invariant.");
-    if ((cached_j < len) && (r == _regions.at(cached_j))) {
-      j = cached_j;
-    } else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
-      j = cached_j + 1;
-    } else {
-      j = find(r);
-#endif
-      if (j < 0) {
-        j = 0;
-      }
-#if 0
-    }
-#endif
+void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
+  size_t hr_index = 0;
+  if (hr != NULL) {
+    hr_index = (size_t) hr->hrs_index();
   }
-  int i;
-  for (i = j; i < len; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+
+  size_t len = length();
+  for (size_t i = hr_index; i < len; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
       blk->incomplete();
       return;
     }
   }
-  for (i = 0; i < j; i += 1) {
-    int res = blk->doHeapRegion(_regions.at(i));
+  for (size_t i = 0; i < hr_index; i += 1) {
+    bool res = blk->doHeapRegion(at(i));
     if (res) {
-#if 0
-      cached_j = i;
-#endif
-      blk->incomplete();
-      return;
-    }
-  }
-}
-
-void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
-  int len = _regions.length();
-  int i;
-  for (i = idx; i < len; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
-      blk->incomplete();
-      return;
-    }
-  }
-  for (i = 0; i < idx; i++) {
-    if (blk->doHeapRegion(_regions.at(i))) {
       blk->incomplete();
       return;
     }
@@ -283,54 +206,92 @@
 }
 
 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   size_t& num_regions_deleted) {
+                                   size_t* num_regions_deleted) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
-  _alloc_search_start = 0;
+  _next_search_index = 0;
 
   assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
   assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
+  assert(length() > 0, "the region sequence should not be empty");
+  assert(length() <= _allocated_length, "invariant");
+  assert(_allocated_length > 0, "we should have at least one region committed");
 
-  if (_regions.length() == 0) {
-    num_regions_deleted = 0;
-    return MemRegion();
-  }
-  int j = _regions.length() - 1;
-  HeapWord* end = _regions.at(j)->end();
+  // around the loop, i will be the next region to be removed
+  size_t i = length() - 1;
+  assert(i > 0, "we should never remove all regions");
+  // [last_start, end) is the MemRegion that covers the regions we will remove.
+  HeapWord* end = at(i)->end();
   HeapWord* last_start = end;
-  while (j >= 0 && shrink_bytes > 0) {
-    HeapRegion* cur = _regions.at(j);
-    // We have to leave humongous regions where they are,
-    // and work around them.
-    if (cur->isHumongous()) {
-      return MemRegion(last_start, end);
-    }
-    assert(cur == _regions.top(), "Should be top");
+  *num_regions_deleted = 0;
+  while (shrink_bytes > 0) {
+    HeapRegion* cur = at(i);
+    // We should leave the humongous regions where they are.
+    if (cur->isHumongous()) break;
+    // We should stop shrinking if we come across a non-empty region.
     if (!cur->is_empty()) break;
+
+    i -= 1;
+    *num_regions_deleted += 1;
     shrink_bytes -= cur->capacity();
-    num_regions_deleted++;
-    _regions.pop();
     last_start = cur->bottom();
-    // We need to delete these somehow, but can't currently do so here: if
-    // we do, the ZF thread may still access the deleted region.  We'll
-    // leave this here as a reminder that we have to do something about
-    // this.
-    // delete cur;
-    j--;
+    decrement_length(&_length);
+    // We will reclaim the HeapRegion. _allocated_length should be
+    // covering this index. So, even though we removed the region from
+    // the active set by decreasing _length, we still have it
+    // available in the future if we need to re-use it.
+    assert(i > 0, "we should never remove all regions");
+    assert(length() > 0, "we should never remove all regions");
   }
   return MemRegion(last_start, end);
 }
 
-class PrintHeapRegionClosure : public  HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    gclog_or_tty->print(PTR_FORMAT ":", r);
-    r->print();
-    return false;
+#ifndef PRODUCT
+void HeapRegionSeq::verify_optional() {
+  guarantee(_length <= _allocated_length,
+            err_msg("invariant: _length: "SIZE_FORMAT" "
+                    "_allocated_length: "SIZE_FORMAT,
+                    _length, _allocated_length));
+  guarantee(_allocated_length <= _max_length,
+            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
+                    "_max_length: "SIZE_FORMAT,
+                    _allocated_length, _max_length));
+  guarantee(_next_search_index <= _length,
+            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
+                    "_length: "SIZE_FORMAT,
+                    _next_search_index, _length));
+
+  HeapWord* prev_end = _heap_bottom;
+  for (size_t i = 0; i < _allocated_length; i += 1) {
+    HeapRegion* hr = _regions[i];
+    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
+    guarantee(hr->bottom() == prev_end,
+              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
+                      "prev_end: "PTR_FORMAT,
+                      i, HR_FORMAT_PARAMS(hr), prev_end));
+    guarantee(hr->hrs_index() == i,
+              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
+                      i, hr->hrs_index()));
+    if (i < _length) {
+      // Asserts will fire if i is >= _length
+      HeapWord* addr = hr->bottom();
+      guarantee(addr_to_region(addr) == hr, "sanity");
+      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
+    } else {
+      guarantee(hr->is_empty(), "sanity");
+      guarantee(!hr->isHumongous(), "sanity");
+      // using assert instead of guarantee here since containing_set()
+      // is only available in non-product builds.
+      assert(hr->containing_set() == NULL, "sanity");
+    }
+    if (hr->startsHumongous()) {
+      prev_end = hr->orig_end();
+    } else {
+      prev_end = hr->end();
+    }
   }
-};
-
-void HeapRegionSeq::print() {
-  PrintHeapRegionClosure cl;
-  iterate(&cl);
+  for (size_t i = _allocated_length; i < _max_length; i += 1) {
+    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
+  }
 }
+#endif // PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -25,92 +25,143 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "utilities/growableArray.hpp"
-
 class HeapRegion;
 class HeapRegionClosure;
+class FreeRegionList;
+
+#define G1_NULL_HRS_INDEX ((size_t) -1)
+
+// This class keeps track of the region metadata (i.e., HeapRegion
+// instances). They are kept in the _regions array in address
+// order. A region's index in the array corresponds to its index in
+// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
+// the one after it, etc.). Two regions that are consecutive in the
+// array should also be adjacent in the address space (i.e.,
+// region(i).end() == region(i+1).bottom().
+//
+// We create a HeapRegion when we commit the region's address space
+// for the first time. When we uncommit the address space of a
+// region we retain the HeapRegion to be able to re-use it in the
+// future (in case we recommit it).
+//
+// We keep track of three lengths:
+//
+// * _length (returned by length()) is the number of currently
+//   committed regions.
+// * _allocated_length (not exposed outside this class) is the
+//   number of regions for which we have HeapRegions.
+// * _max_length (returned by max_length()) is the maximum number of
+//   regions the heap can have.
+//
+// and maintain that: _length <= _allocated_length <= _max_length
 
 class HeapRegionSeq: public CHeapObj {
 
-  // _regions is kept sorted by start address order, and no two regions are
-  // overlapping.
-  GrowableArray<HeapRegion*> _regions;
+  // The array that holds the HeapRegions.
+  HeapRegion** _regions;
+
+  // Version of _regions biased to address 0
+  HeapRegion** _regions_biased;
+
+  // The number of regions committed in the heap.
+  size_t _length;
 
-  // The index in "_regions" at which to start the next allocation search.
-  // (For efficiency only; private to obj_allocate after initialization.)
-  int _alloc_search_start;
+  // The address of the first reserved word in the heap.
+  HeapWord* _heap_bottom;
+
+  // The address of the last reserved word in the heap - 1.
+  HeapWord* _heap_end;
+
+  // The log of the region byte size.
+  size_t _region_shift;
+
+  // A hint for which index to start searching from for humongous
+  // allocations.
+  size_t _next_search_index;
 
-  // Finds a contiguous set of empty regions of length num, starting
-  // from a given index.
-  int find_contiguous_from(int from, size_t num);
+  // The number of regions for which we have allocated HeapRegions for.
+  size_t _allocated_length;
+
+  // The maximum number of regions in the heap.
+  size_t _max_length;
+
+  // Find a contiguous set of empty regions of length num, starting
+  // from the given index.
+  size_t find_contiguous_from(size_t from, size_t num);
 
-  // Currently, we're choosing collection sets in a round-robin fashion,
-  // starting here.
-  int _next_rr_candidate;
+  // Map a heap address to a biased region index. Assume that the
+  // address is valid.
+  inline size_t addr_to_index_biased(HeapWord* addr) const;
 
-  // The bottom address of the bottom-most region, or else NULL if there
-  // are no regions in the sequence.
-  char* _seq_bottom;
+  void increment_length(size_t* length) {
+    assert(*length < _max_length, "pre-condition");
+    *length += 1;
+  }
+
+  void decrement_length(size_t* length) {
+    assert(*length > 0, "pre-condition");
+    *length -= 1;
+  }
 
  public:
-  // Initializes "this" to the empty sequence of regions.
-  HeapRegionSeq(const size_t max_size);
+  // Empty contructor, we'll initialize it with the initialize() method.
+  HeapRegionSeq() { }
+
+  void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
 
-  // Adds "hr" to "this" sequence.  Requires "hr" not to overlap with
-  // any region already in "this".  (Will perform better if regions are
-  // inserted in ascending address order.)
-  void insert(HeapRegion* hr);
+  // Return the HeapRegion at the given index. Assume that the index
+  // is valid.
+  inline HeapRegion* at(size_t index) const;
+
+  // If addr is within the committed space return its corresponding
+  // HeapRegion, otherwise return NULL.
+  inline HeapRegion* addr_to_region(HeapWord* addr) const;
+
+  // Return the HeapRegion that corresponds to the given
+  // address. Assume the address is valid.
+  inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
 
-  // Given a HeapRegion*, returns its index within _regions,
-  // or returns -1 if not found.
-  int find(HeapRegion* hr);
+  // Return the number of regions that have been committed in the heap.
+  size_t length() const { return _length; }
+
+  // Return the maximum number of regions in the heap.
+  size_t max_length() const { return _max_length; }
 
-  // Requires the index to be valid, and return the region at the index.
-  HeapRegion* at(size_t i) { return _regions.at((int)i); }
+  // Expand the sequence to reflect that the heap has grown from
+  // old_end to new_end. Either create new HeapRegions, or re-use
+  // existing ones, and return them in the given list. Returns the
+  // memory region that covers the newly-created regions. If a
+  // HeapRegion allocation fails, the result memory region might be
+  // smaller than the desired one.
+  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
+                      FreeRegionList* list);
 
-  // Return the number of regions in the sequence.
-  size_t length();
-
-  // Returns the number of contiguous regions at the end of the sequence
+  // Return the number of contiguous regions at the end of the sequence
   // that are available for allocation.
   size_t free_suffix();
 
   // Find a contiguous set of empty regions of length num and return
-  // the index of the first region or -1 if the search was unsuccessful.
-  int find_contiguous(size_t num);
+  // the index of the first region or G1_NULL_HRS_INDEX if the
+  // search was unsuccessful.
+  size_t find_contiguous(size_t num);
 
-  // Apply the "doHeapRegion" method of "blk" to all regions in "this",
-  // in address order, terminating the iteration early
-  // if the "doHeapRegion" method returns "true".
-  void iterate(HeapRegionClosure* blk);
-
-  // Apply the "doHeapRegion" method of "blk" to all regions in "this",
-  // starting at "r" (or first region, if "r" is NULL), in a circular
-  // manner, terminating the iteration early if the "doHeapRegion" method
-  // returns "true".
-  void iterate_from(HeapRegion* r, HeapRegionClosure* blk);
+  // Apply blk->doHeapRegion() on all committed regions in address order,
+  // terminating the iteration early if doHeapRegion() returns true.
+  void iterate(HeapRegionClosure* blk) const;
 
-  // As above, but start from a given index in the sequence
-  // instead of a given heap region.
-  void iterate_from(int idx, HeapRegionClosure* blk);
+  // As above, but start the iteration from hr and loop around. If hr
+  // is NULL, we start from the first region in the heap.
+  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
 
-  // Requires "shrink_bytes" to be a multiple of the page size and heap
-  // region granularity.  Deletes as many "rightmost" completely free heap
-  // regions from the sequence as comprise shrink_bytes bytes.  Returns the
-  // MemRegion indicating the region those regions comprised, and sets
-  // "num_regions_deleted" to the number of regions deleted.
-  MemRegion shrink_by(size_t shrink_bytes, size_t& num_regions_deleted);
+  // Tag as uncommitted as many regions that are completely free as
+  // possible, up to shrink_bytes, from the suffix of the committed
+  // sequence. Return a MemRegion that corresponds to the address
+  // range of the uncommitted regions. Assume shrink_bytes is page and
+  // heap region aligned.
+  MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
 
-  // If "addr" falls within a region in the sequence, return that region,
-  // or else NULL.
-  inline HeapRegion* addr_to_region(const void* addr);
-
-  void print();
-
-  // Prints out runs of empty regions.
-  void print_empty_runs();
-
+  // Do some sanity checking.
+  void verify_optional() PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,23 +25,42 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
 
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 
-inline HeapRegion* HeapRegionSeq::addr_to_region(const void* addr) {
-  assert(_seq_bottom != NULL, "bad _seq_bottom in addr_to_region");
-  if ((char*) addr >= _seq_bottom) {
-    size_t diff = (size_t) pointer_delta((HeapWord*) addr,
-                                         (HeapWord*) _seq_bottom);
-    int index = (int) (diff >> HeapRegion::LogOfHRGrainWords);
-    assert(index >= 0, "invariant / paranoia");
-    if (index < _regions.length()) {
-      HeapRegion* hr = _regions.at(index);
-      assert(hr->is_in_reserved(addr),
-             "addr_to_region is wrong...");
-      return hr;
-    }
+inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
+  assert(_heap_bottom <= addr && addr < _heap_end,
+         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
+                 addr, _heap_bottom, _heap_end));
+  size_t index = (size_t) addr >> _region_shift;
+  return index;
+}
+
+inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
+  assert(_heap_bottom <= addr && addr < _heap_end,
+         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
+                 addr, _heap_bottom, _heap_end));
+  size_t index_biased = addr_to_index_biased(addr);
+  HeapRegion* hr = _regions_biased[index_biased];
+  assert(hr != NULL, "invariant");
+  return hr;
+}
+
+inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
+  if (addr != NULL && addr < _heap_end) {
+    assert(addr >= _heap_bottom,
+          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+    return addr_to_region_unsafe(addr);
   }
   return NULL;
 }
 
+inline HeapRegion* HeapRegionSeq::at(size_t index) const {
+  assert(index < length(), "pre-condition");
+  HeapRegion* hr = _regions[index];
+  assert(hr != NULL, "sanity");
+  assert(hr->hrs_index() == index, "sanity");
+  return hr;
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 
 //////////////////// FreeRegionList ////////////////////
@@ -38,6 +39,16 @@
 
 //////////////////// MasterFreeRegionList ////////////////////
 
+const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) {
+  // We should reset the RSet for parallel iteration before we add it
+  // to the master free list so that it is ready when the region is
+  // re-allocated.
+  if (!hr->rem_set()->verify_ready_for_par_iteration()) {
+    return "the region's RSet should be ready for parallel iteration";
+  }
+  return FreeRegionList::verify_region_extra(hr);
+}
+
 bool MasterFreeRegionList::check_mt_safety() {
   // Master Free List MT safety protocol:
   // (a) If we're at a safepoint, operations on the master free list
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
 
 class MasterFreeRegionList : public FreeRegionList {
 protected:
+  virtual const char* verify_region_extra(HeapRegion* hr);
   virtual bool check_mt_safety();
 
 public:
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -481,8 +481,9 @@
 
 bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Adding card %d from region %d to region %d sparse.",
-                card_index, region_id, _hr->hrs_index());
+  gclog_or_tty->print_cr("  Adding card %d from region %d to region "
+                         SIZE_FORMAT" sparse.",
+                         card_index, region_id, _hr->hrs_index());
 #endif
   if (_next->occupied_entries() * 2 > _next->capacity()) {
     expand();
@@ -533,8 +534,8 @@
   _next = new RSHashTable(last->capacity() * 2);
 
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Expanded sparse table for %d to %d.",
-                _hr->hrs_index(), _next->capacity());
+  gclog_or_tty->print_cr("  Expanded sparse table for "SIZE_FORMAT" to %d.",
+                         _hr->hrs_index(), _next->capacity());
 #endif
   for (size_t i = 0; i < last->capacity(); i++) {
     SparsePRTEntry* e = last->entry((int)i);
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -99,6 +99,18 @@
     // At this point we are supposed to start a concurrent cycle. We
     // will do so if one is not already in progress.
     bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
+
+    // The above routine returns true if we were able to force the
+    // next GC pause to be an initial mark; it returns false if a
+    // marking cycle is already in progress.
+    //
+    // If a marking cycle is already in progress just return and skip
+    // the pause - the requesting thread should block in doit_epilogue
+    // until the marking cycle is complete.
+    if (!res) {
+      assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating");
+      return;
+    }
   }
 
   _pause_succeeded =
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -348,15 +348,31 @@
         // cleared before we had a chance to examine it. In that case, the value
         // will have been logged in the LNC for that chunk.
         // We need to examine as many chunks to the right as this object
-        // covers.
-        const uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
-                                                    - lowest_non_clean_base_chunk_index;
-        DEBUG_ONLY(const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
-                                                      - lowest_non_clean_base_chunk_index;)
-        assert(last_chunk_index_to_check <= last_chunk_index,
-               err_msg("Out of bounds: last_chunk_index_to_check " INTPTR_FORMAT
-                       " exceeds last_chunk_index " INTPTR_FORMAT,
-                       last_chunk_index_to_check, last_chunk_index));
+        // covers. However, we need to bound this checking to the largest
+        // entry in the LNC array: this is because the heap may expand
+        // after the LNC array has been created but before we reach this point,
+        // and the last block in our chunk may have been expanded to include
+        // the expansion delta (and possibly subsequently allocated from, so
+        // it wouldn't be sufficient to check whether that last block was
+        // or was not an object at this point).
+        uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
+                                              - lowest_non_clean_base_chunk_index;
+        const uintptr_t last_chunk_index    = addr_to_chunk_index(used.last())
+                                              - lowest_non_clean_base_chunk_index;
+        if (last_chunk_index_to_check > last_chunk_index) {
+          assert(last_block + last_block_size > used.end(),
+                 err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
+                         " does not exceed used.end() = " PTR_FORMAT ","
+                         " yet last_chunk_index_to_check " INTPTR_FORMAT
+                         " exceeds last_chunk_index " INTPTR_FORMAT,
+                         last_chunk_index_to_check, last_chunk_index));
+          assert(sp->used_region().end() > used.end(),
+                 err_msg("Expansion did not happen: "
+                         "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
+                         sp->used_region().start(), sp->used_region().end(), used.start(), used.end()));
+          NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
+          last_chunk_index_to_check = last_chunk_index;
+        }
         for (uintptr_t lnc_index = cur_chunk_index + 1;
              lnc_index <= last_chunk_index_to_check;
              lnc_index++) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -339,6 +339,21 @@
   return false;
 }
 
+bool ParallelScavengeHeap::is_scavengable(const void* addr) {
+  return is_in_young((oop)addr);
+}
+
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is perm (low addr), old, young (high addr)
+  return p >= old_gen()->reserved().end();
+}
+#endif
+
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
@@ -371,8 +386,6 @@
 // we rely on the size_policy object to force a bail out.
 HeapWord* ParallelScavengeHeap::mem_allocate(
                                      size_t size,
-                                     bool is_noref,
-                                     bool is_tlab,
                                      bool* gc_overhead_limit_was_exceeded) {
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
@@ -383,7 +396,7 @@
   // limit is being exceeded as checked below.
   *gc_overhead_limit_was_exceeded = false;
 
-  HeapWord* result = young_gen()->allocate(size, is_tlab);
+  HeapWord* result = young_gen()->allocate(size);
 
   uint loop_count = 0;
   uint gc_count = 0;
@@ -404,7 +417,7 @@
       MutexLocker ml(Heap_lock);
       gc_count = Universe::heap()->total_collections();
 
-      result = young_gen()->allocate(size, is_tlab);
+      result = young_gen()->allocate(size);
 
       // (1) If the requested object is too large to easily fit in the
       //     young_gen, or
@@ -418,21 +431,13 @@
       if (result != NULL) {
         return result;
       }
-      if (!is_tlab &&
-          size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
-        result = old_gen()->allocate(size, is_tlab);
+      if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
+        result = old_gen()->allocate(size);
         if (result != NULL) {
           return result;
         }
       }
       if (GC_locker::is_active_and_needs_gc()) {
-        // GC is locked out. If this is a TLAB allocation,
-        // return NULL; the requestor will retry allocation
-        // of an idividual object at a time.
-        if (is_tlab) {
-          return NULL;
-        }
-
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
         // GC allowed. When the critical section clears, a GC is
@@ -457,7 +462,7 @@
     if (result == NULL) {
 
       // Generate a VM operation
-      VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
+      VM_ParallelGCFailedAllocation op(size, gc_count);
       VMThread::execute(&op);
 
       // Did the VM operation execute? If so, return the result directly.
@@ -511,7 +516,7 @@
     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
         (loop_count % QueuedAllocationWarningCount == 0)) {
       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
-              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
+              " size=%d", loop_count, size);
     }
   }
 
@@ -524,7 +529,7 @@
 // time over limit here, that is the responsibility of the heap specific
 // collection methods. This method decides where to attempt allocations,
 // and when to attempt collections, but no collection specific policy.
-HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
+HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
@@ -538,7 +543,7 @@
   // First level allocation failure, scavenge and allocate in young gen.
   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   PSScavenge::invoke();
-  HeapWord* result = young_gen()->allocate(size, is_tlab);
+  HeapWord* result = young_gen()->allocate(size);
 
   // Second level allocation failure.
   //   Mark sweep and allocate in young generation.
@@ -547,28 +552,28 @@
     // Don't mark sweep twice if so.
     if (mark_sweep_invocation_count == total_invocations()) {
       invoke_full_gc(false);
-      result = young_gen()->allocate(size, is_tlab);
+      result = young_gen()->allocate(size);
     }
   }
 
   // Third level allocation failure.
   //   After mark sweep and young generation allocation failure,
   //   allocate in old generation.
-  if (result == NULL && !is_tlab) {
-    result = old_gen()->allocate(size, is_tlab);
+  if (result == NULL) {
+    result = old_gen()->allocate(size);
   }
 
   // Fourth level allocation failure. We're running out of memory.
   //   More complete mark sweep and allocate in young generation.
   if (result == NULL) {
     invoke_full_gc(true);
-    result = young_gen()->allocate(size, is_tlab);
+    result = young_gen()->allocate(size);
   }
 
   // Fifth level allocation failure.
   //   After more complete mark sweep, allocate in old generation.
-  if (result == NULL && !is_tlab) {
-    result = old_gen()->allocate(size, is_tlab);
+  if (result == NULL) {
+    result = old_gen()->allocate(size);
   }
 
   return result;
@@ -746,7 +751,7 @@
 }
 
 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
-  return young_gen()->allocate(size, true);
+  return young_gen()->allocate(size);
 }
 
 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
@@ -886,7 +891,7 @@
 }
 
 
-void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
+void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
   // Why do we need the total_collections()-filter below?
   if (total_collections() > 0) {
     if (!silent) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,6 +127,12 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
+  // Return true if the reference points to an object that
+  // can be moved in a partial collection.  For currently implemented
+  // generational collectors that means during a collection of
+  // the young gen.
+  virtual bool is_scavengable(const void* addr);
+
   // Does this heap support heap inspection? (+PrintClassHistogram)
   bool supports_heap_inspection() const { return true; }
 
@@ -143,6 +149,10 @@
     return perm_gen()->reserved().contains(p);
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void *p);
+#endif
+
   bool is_permanent(const void *p) const {    // committed part
     return perm_gen()->is_in(p);
   }
@@ -155,12 +165,13 @@
   // an excessive amount of time is being spent doing collections
   // and caused a NULL to be returned.  If a NULL is not returned,
   // "gc_time_limit_was_exceeded" has an undefined meaning.
+  HeapWord* mem_allocate(size_t size,
+                         bool* gc_overhead_limit_was_exceeded);
 
-  HeapWord* mem_allocate(size_t size,
-                         bool is_noref,
-                         bool is_tlab,
-                         bool* gc_overhead_limit_was_exceeded);
-  HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
+  // Allocation attempt(s) during a safepoint. It should never be called
+  // to allocate a new TLAB as this allocation might be satisfied out
+  // of the old generation.
+  HeapWord* failed_mem_allocate(size_t size);
 
   HeapWord* permanent_mem_allocate(size_t size);
   HeapWord* failed_permanent_mem_allocate(size_t size);
@@ -184,8 +195,6 @@
   inline void invoke_scavenge();
   inline void invoke_full_gc(bool maximum_compaction);
 
-  size_t large_typearray_limit() { return FastAllocateSizeLimit; }
-
   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 
   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
@@ -243,7 +252,7 @@
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
 
-  void verify(bool allow_dirty, bool silent, bool /* option */);
+  void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
 
   void print_heap_change(size_t prev_used);
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -51,7 +51,12 @@
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
-  return young_gen()->is_in_reserved(p);
+  // Assumes the the old gen address range is lower than that of the young gen.
+  const void* loc = (void*) p;
+  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
+  assert(result == young_gen()->is_in_reserved(p),
+        err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
 }
 
 inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -182,12 +182,12 @@
 
 // Allocation. We report all successful allocations to the size policy
 // Note that the perm gen does not use this method, and should not!
-HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
+HeapWord* PSOldGen::allocate(size_t word_size) {
   assert_locked_or_safepoint(Heap_lock);
-  HeapWord* res = allocate_noexpand(word_size, is_tlab);
+  HeapWord* res = allocate_noexpand(word_size);
 
   if (res == NULL) {
-    res = expand_and_allocate(word_size, is_tlab);
+    res = expand_and_allocate(word_size);
   }
 
   // Allocations in the old generation need to be reported
@@ -199,13 +199,12 @@
   return res;
 }
 
-HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
-  assert(!is_tlab, "TLAB's are not supported in PSOldGen");
+HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
   expand(word_size*HeapWordSize);
   if (GCExpandToAllocateDelayMillis > 0) {
     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   }
-  return allocate_noexpand(word_size, is_tlab);
+  return allocate_noexpand(word_size);
 }
 
 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -60,9 +60,8 @@
   // Used when initializing the _name field.
   static inline const char* select_name();
 
-  HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) {
+  HeapWord* allocate_noexpand(size_t word_size) {
     // We assume the heap lock is held here.
-    assert(!is_tlab, "Does not support TLAB allocation");
     assert_locked_or_safepoint(Heap_lock);
     HeapWord* res = object_space()->allocate(word_size);
     if (res != NULL) {
@@ -89,7 +88,7 @@
     return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
   }
 
-  HeapWord* expand_and_allocate(size_t word_size, bool is_tlab);
+  HeapWord* expand_and_allocate(size_t word_size);
   HeapWord* expand_and_cas_allocate(size_t word_size);
   void expand(size_t bytes);
   bool expand_by(size_t bytes);
@@ -164,7 +163,7 @@
 
   // Allocation. We report all successful allocations to the size policy
   // Note that the perm gen does not use this method, and should not!
-  HeapWord* allocate(size_t word_size, bool is_tlab);
+  HeapWord* allocate(size_t word_size);
 
   // Iteration.
   void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -46,10 +46,10 @@
 
 HeapWord* PSPermGen::allocate_permanent(size_t size) {
   assert_locked_or_safepoint(Heap_lock);
-  HeapWord* obj = allocate_noexpand(size, false);
+  HeapWord* obj = allocate_noexpand(size);
 
   if (obj == NULL) {
-    obj = expand_and_allocate(size, false);
+    obj = expand_and_allocate(size);
   }
 
   return obj;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -157,7 +157,7 @@
   }
 
   // Allocation
-  HeapWord* allocate(size_t word_size, bool is_tlab) {
+  HeapWord* allocate(size_t word_size) {
     HeapWord* result = eden_space()->cas_allocate(word_size);
     return result;
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -33,10 +33,9 @@
 
 // The following methods are used by the parallel scavenge collector
 VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
-  bool is_tlab, unsigned int gc_count) :
+                                                      unsigned int gc_count) :
   VM_GC_Operation(gc_count, GCCause::_allocation_failure),
   _size(size),
-  _is_tlab(is_tlab),
   _result(NULL)
 {
 }
@@ -48,7 +47,7 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
 
   GCCauseSetter gccs(heap, _gc_cause);
-  _result = heap->failed_mem_allocate(_size, _is_tlab);
+  _result = heap->failed_mem_allocate(_size);
 
   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,12 +32,10 @@
 class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
  private:
   size_t    _size;
-  bool      _is_tlab;
   HeapWord* _result;
 
  public:
-  VM_ParallelGCFailedAllocation(size_t size, bool is_tlab,
-                                unsigned int gc_count);
+  VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
 
   virtual VMOp_Type type() const {
     return VMOp_ParallelGCFailedAllocation;
--- a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -99,14 +99,16 @@
     // vulnerable to noisy glitches. In such cases, we
     // ignore the current sample and use currently available
     // historical estimates.
-    // XXX NEEDS TO BE FIXED
-    // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
-    //     ^^^^^^^^^^^^^^^^^^^^^^^^^^^    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-    //     "Total Stock"                  "Not used at this block size"
+    assert(prevSweep() + splitBirths() + coalBirths()        // "Total Production Stock"
+           >= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
+           "Conservation Principle");
     if (inter_sweep_current > _threshold) {
-      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
-      // XXX NEEDS TO BE FIXED
-      // assert(demand >= 0, "Demand should be non-negative");
+      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
+                       - splitDeaths() - coalDeaths();
+      assert(demand >= 0,
+             err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
+                     PTR_FORMAT " (size=" SIZE_FORMAT ")",
+                     demand, this, count));
       // Defensive: adjust for imprecision in event counting
       if (demand < 0) {
         demand = 0;
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -43,17 +43,6 @@
   _sts.initialize();
 };
 
-void ConcurrentGCThread::stopWorldAndDo(VoidClosure* op) {
-  MutexLockerEx x(Heap_lock,
-                  Mutex::_no_safepoint_check_flag);
-  // warning("CGC: about to try stopping world");
-  SafepointSynchronize::begin();
-  // warning("CGC: successfully stopped world");
-  op->do_void();
-  SafepointSynchronize::end();
-  // warning("CGC: successfully restarted world");
-}
-
 void ConcurrentGCThread::safepoint_synchronize() {
   _sts.suspend_all();
 }
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -95,8 +95,6 @@
   static int set_CGC_flag(int b)           { return _CGC_flag |= b; }
   static int reset_CGC_flag(int b)         { return _CGC_flag &= ~b; }
 
-  void stopWorldAndDo(VoidClosure* op);
-
   // All instances share this one set.
   static SuspendibleThreadSet _sts;
 
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,7 @@
   // pure virtual.
   void pre_initialize();
 
-  // Create a new tlab
+  // Create a new tlab. All TLAB allocations must go through this.
   virtual HeapWord* allocate_new_tlab(size_t size);
 
   // Accumulate statistics on all tlabs.
@@ -109,11 +109,11 @@
 
   // Allocate an uninitialized block of the given size, or returns NULL if
   // this is impossible.
-  inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
+  inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
 
   // Like allocate_init, but the block returned by a successful allocation
   // is guaranteed initialized to zeros.
-  inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
+  inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
 
   // Same as common_mem version, except memory is allocated in the permanent area
   // If there is no permanent area, revert to common_mem_allocate_noinit
@@ -269,6 +269,13 @@
   // space). If you need the more conservative answer use is_permanent().
   virtual bool is_in_permanent(const void *p) const = 0;
 
+
+#ifdef ASSERT
+  // Returns true if "p" is in the part of the
+  // heap being collected.
+  virtual bool is_in_partial_collection(const void *p) = 0;
+#endif
+
   bool is_in_permanent_or_null(const void *p) const {
     return p == NULL || is_in_permanent(p);
   }
@@ -284,11 +291,7 @@
 
   // An object is scavengable if its location may move during a scavenge.
   // (A scavenge is a GC which is not a full GC.)
-  // Currently, this just means it is not perm (and not null).
-  // This could change if we rethink what's in perm-gen.
-  bool is_scavengable(const void *p) const {
-    return !is_in_permanent_or_null(p);
-  }
+  virtual bool is_scavengable(const void *p) = 0;
 
   // Returns "TRUE" if "p" is a method oop in the
   // current heap, with high probability. This predicate
@@ -319,7 +322,6 @@
   // General obj/array allocation facilities.
   inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
   inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
-  inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
 
   // Special obj/array allocation facilities.
   // Some heaps may want to manage "permanent" data uniquely. These default
@@ -342,16 +344,12 @@
   // Raw memory allocation facilities
   // The obj and array allocate methods are covers for these methods.
   // The permanent allocation method should default to mem_allocate if
-  // permanent memory isn't supported.
+  // permanent memory isn't supported. mem_allocate() should never be
+  // called to allocate TLABs, only individual objects.
   virtual HeapWord* mem_allocate(size_t size,
-                                 bool is_noref,
-                                 bool is_tlab,
                                  bool* gc_overhead_limit_was_exceeded) = 0;
   virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
 
-  // The boundary between a "large" and "small" array of primitives, in words.
-  virtual size_t large_typearray_limit() = 0;
-
   // Utilities for turning raw memory into filler objects.
   //
   // min_fill_size() is the smallest region that can be filled.
@@ -603,7 +601,7 @@
   virtual void print_tracing_info() const = 0;
 
   // Heap verification
-  virtual void verify(bool allow_dirty, bool silent, bool option) = 0;
+  virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
 
   // Non product verification and debugging.
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -122,7 +122,7 @@
   post_allocation_notify(klass, (oop)obj);
 }
 
-HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS) {
+HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
 
   // Clear unhandled oops for memory allocation.  Memory allocation might
   // not take out a lock if from tlab, so clear here.
@@ -133,7 +133,6 @@
     return NULL;  // caller does a CHECK_0 too
   }
 
-  // We may want to update this, is_noref objects might not be allocated in TLABs.
   HeapWord* result = NULL;
   if (UseTLAB) {
     result = CollectedHeap::allocate_from_tlab(THREAD, size);
@@ -145,8 +144,6 @@
   }
   bool gc_overhead_limit_was_exceeded = false;
   result = Universe::heap()->mem_allocate(size,
-                                          is_noref,
-                                          false,
                                           &gc_overhead_limit_was_exceeded);
   if (result != NULL) {
     NOT_PRODUCT(Universe::heap()->
@@ -183,8 +180,8 @@
   }
 }
 
-HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, bool is_noref, TRAPS) {
-  HeapWord* obj = common_mem_allocate_noinit(size, is_noref, CHECK_NULL);
+HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
+  HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
   init_obj(obj, size);
   return obj;
 }
@@ -255,7 +252,7 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
+  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
   post_allocation_setup_obj(klass, obj, size);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
@@ -268,20 +265,7 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, false, CHECK_NULL);
-  post_allocation_setup_array(klass, obj, size, length);
-  NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
-  return (oop)obj;
-}
-
-oop CollectedHeap::large_typearray_allocate(KlassHandle klass,
-                                            int size,
-                                            int length,
-                                            TRAPS) {
-  debug_only(check_for_valid_allocation_state());
-  assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
-  assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(size, true, CHECK_NULL);
+  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
   post_allocation_setup_array(klass, obj, size, length);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -203,11 +203,14 @@
   if (value == NULL) {
     st->print_cr(" NULL");
   } else if (java_lang_String::is_instance(value)) {
-    EXCEPTION_MARK;
-    Handle h_value (THREAD, value);
-    Symbol* sym = java_lang_String::as_symbol(h_value, CATCH);
-    print_symbol(sym, st);
-    sym->decrement_refcount();
+    char buf[40];
+    int len = java_lang_String::utf8_length(value);
+    java_lang_String::as_utf8_string(value, buf, sizeof(buf));
+    if (len >= (int)sizeof(buf)) {
+      st->print_cr(" %s...[%d]", buf, len);
+    } else {
+      st->print_cr(" %s", buf);
+    }
   } else {
     st->print_cr(" " PTR_FORMAT, (intptr_t) value);
   }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -139,9 +139,15 @@
   ResourceMark rm(thread);
   methodHandle m (thread, method(thread));
   Bytecode_loadconstant ldc(m, bci(thread));
-  oop result = ldc.resolve_constant(THREAD);
-  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index()));
-  assert(result == cpce->f1(), "expected result for assembly code");
+  oop result = ldc.resolve_constant(CHECK);
+#ifdef ASSERT
+  {
+    // The bytecode wrappers aren't GC-safe so construct a new one
+    Bytecode_loadconstant ldc2(m, bci(thread));
+    ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index());
+    assert(result == cpce->f1(), "expected result for assembly code");
+  }
+#endif
 }
 IRT_END
 
@@ -356,25 +362,6 @@
   THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
 IRT_END
 
-// required can be either a MethodType, or a Class (for a single argument)
-// actual (if not null) can be either a MethodHandle, or an arbitrary value (for a single argument)
-IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* thread,
-                                                                   oopDesc* required,
-                                                                   oopDesc* actual)) {
-  ResourceMark rm(thread);
-  char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual);
-
-  if (ProfileTraps) {
-    note_trap(thread, Deoptimization::Reason_constraint, CHECK);
-  }
-
-  // create exception
-  THROW_MSG(vmSymbols::java_lang_invoke_WrongMethodTypeException(), message);
-}
-IRT_END
-
-
-
 // exception_handler_for_exception(...) returns the continuation address,
 // the exception oop (via TLS) and sets the bci/bcp for the continuation.
 // The exception oop is returned to make sure it is preserved over GC (it
@@ -857,7 +844,7 @@
   const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
   const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
 
-  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
+  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
 
   if (osr_nm != NULL) {
     // We may need to do on-stack replacement which requires that no
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -98,7 +98,6 @@
   static void    throw_StackOverflowError(JavaThread* thread);
   static void    throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index);
   static void    throw_ClassCastException(JavaThread* thread, oopDesc* obj);
-  static void    throw_WrongMethodTypeException(JavaThread* thread, oopDesc* mtype = NULL, oopDesc* mhandle = NULL);
   static void    create_exception(JavaThread* thread, char* name, char* message);
   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -294,6 +294,16 @@
   Symbol*  method_signature  = pool->signature_ref_at(index);
   KlassHandle  current_klass(THREAD, pool->pool_holder());
 
+  if (pool->has_preresolution()
+      || (resolved_klass() == SystemDictionary::MethodHandle_klass() &&
+          methodOopDesc::is_method_handle_invoke_name(method_name))) {
+    methodOop result_oop = constantPoolOopDesc::method_at_if_loaded(pool, index);
+    if (result_oop != NULL) {
+      resolved_method = methodHandle(THREAD, result_oop);
+      return;
+    }
+  }
+
   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
 }
 
@@ -1117,7 +1127,24 @@
   // The extra MH receiver will be inserted into the stack on every call.
   methodHandle resolved_method;
   KlassHandle current_klass(THREAD, pool->pool_holder());
-  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
+  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
+      // throw these guys, since they are already wrapped
+      return;
+    }
+    if (!PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
+      // intercept only LinkageErrors which might have failed to wrap
+      return;
+    }
+    // See the "Linking Exceptions" section for the invokedynamic instruction in the JVMS.
+    Handle ex(THREAD, PENDING_EXCEPTION);
+    CLEAR_PENDING_EXCEPTION;
+    oop bsme = Klass::cast(SystemDictionary::BootstrapMethodError_klass())->java_mirror();
+    MethodHandles::raise_exception(Bytecodes::_athrow, ex(), bsme, CHECK);
+    // java code should not return, but if it does throw out anyway
+    THROW(vmSymbols::java_lang_InternalError());
+  }
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -63,6 +63,15 @@
   _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
 }
 
+// Unrewrite the bytecodes if an error occurs.
+void Rewriter::restore_bytecodes() {
+  int len = _methods->length();
+
+  for (int i = len-1; i >= 0; i--) {
+    methodOop method = (methodOop)_methods->obj_at(i);
+    scan_method(method, true);
+  }
+}
 
 // Creates a constant pool cache given a CPC map
 void Rewriter::make_constant_pool_cache(TRAPS) {
@@ -133,57 +142,94 @@
 
 
 // Rewrite a classfile-order CP index into a native-order CPC index.
-void Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  int  cp_index    = Bytes::get_Java_u2(p);
-  int  cache_index = cp_entry_to_cp_cache(cp_index);
-  Bytes::put_native_u2(p, cache_index);
+  if (!reverse) {
+    int  cp_index    = Bytes::get_Java_u2(p);
+    int  cache_index = cp_entry_to_cp_cache(cp_index);
+    Bytes::put_native_u2(p, cache_index);
+  } else {
+    int cache_index = Bytes::get_native_u2(p);
+    int pool_index = cp_cache_entry_pool_index(cache_index);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
-void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  assert(p[-1] == Bytecodes::_invokedynamic, "");
-  int cp_index = Bytes::get_Java_u2(p);
-  int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
-  int cpc2 = add_secondary_cp_cache_entry(cpc);
+  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
+  if (!reverse) {
+    int cp_index = Bytes::get_Java_u2(p);
+    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
+    int cpc2 = add_secondary_cp_cache_entry(cpc);
 
-  // Replace the trailing four bytes with a CPC index for the dynamic
-  // call site.  Unlike other CPC entries, there is one per bytecode,
-  // not just one per distinct CP entry.  In other words, the
-  // CPC-to-CP relation is many-to-one for invokedynamic entries.
-  // This means we must use a larger index size than u2 to address
-  // all these entries.  That is the main reason invokedynamic
-  // must have a five-byte instruction format.  (Of course, other JVM
-  // implementations can use the bytes for other purposes.)
-  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
-  // Note: We use native_u4 format exclusively for 4-byte indexes.
+    // Replace the trailing four bytes with a CPC index for the dynamic
+    // call site.  Unlike other CPC entries, there is one per bytecode,
+    // not just one per distinct CP entry.  In other words, the
+    // CPC-to-CP relation is many-to-one for invokedynamic entries.
+    // This means we must use a larger index size than u2 to address
+    // all these entries.  That is the main reason invokedynamic
+    // must have a five-byte instruction format.  (Of course, other JVM
+    // implementations can use the bytes for other purposes.)
+    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
+    // Note: We use native_u4 format exclusively for 4-byte indexes.
+  } else {
+    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
+                        Bytes::get_native_u4(p));
+    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
+    int pool_index = cp_cache_entry_pool_index(secondary_index);
+    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
+    // zero out 4 bytes
+    Bytes::put_Java_u4(p, 0);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
 // Rewrite some ldc bytecodes to _fast_aldc
-void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
-  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
-  address p = bcp + offset;
-  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
-  constantTag tag = _pool->tag_at(cp_index).value();
-  if (tag.is_method_handle() || tag.is_method_type()) {
-    int cache_index = cp_entry_to_cp_cache(cp_index);
-    if (is_wide) {
-      (*bcp) = Bytecodes::_fast_aldc_w;
-      assert(cache_index == (u2)cache_index, "");
-      Bytes::put_native_u2(p, cache_index);
-    } else {
-      (*bcp) = Bytecodes::_fast_aldc;
-      assert(cache_index == (u1)cache_index, "");
-      (*p) = (u1)cache_index;
+void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
+                                 bool reverse) {
+  if (!reverse) {
+    assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
+    address p = bcp + offset;
+    int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
+    constantTag tag = _pool->tag_at(cp_index).value();
+    if (tag.is_method_handle() || tag.is_method_type()) {
+      int cache_index = cp_entry_to_cp_cache(cp_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_fast_aldc_w;
+        assert(cache_index == (u2)cache_index, "index overflow");
+        Bytes::put_native_u2(p, cache_index);
+      } else {
+        (*bcp) = Bytecodes::_fast_aldc;
+        assert(cache_index == (u1)cache_index, "index overflow");
+        (*p) = (u1)cache_index;
+      }
+    }
+  } else {
+    Bytecodes::Code rewritten_bc =
+              (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
+    if ((*bcp) == rewritten_bc) {
+      address p = bcp + offset;
+      int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
+      int pool_index = cp_cache_entry_pool_index(cache_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_ldc_w;
+        assert(pool_index == (u2)pool_index, "index overflow");
+        Bytes::put_Java_u2(p, pool_index);
+      } else {
+        (*bcp) = Bytecodes::_ldc;
+        assert(pool_index == (u1)pool_index, "index overflow");
+        (*p) = (u1)pool_index;
+      }
     }
   }
 }
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(methodOop method) {
+void Rewriter::scan_method(methodOop method, bool reverse) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -236,6 +282,13 @@
 #endif
           break;
         }
+        case Bytecodes::_fast_linearswitch:
+        case Bytecodes::_fast_binaryswitch: {
+#ifndef CC_INTERP
+          (*bcp) = Bytecodes::_lookupswitch;
+#endif
+          break;
+        }
         case Bytecodes::_getstatic      : // fall through
         case Bytecodes::_putstatic      : // fall through
         case Bytecodes::_getfield       : // fall through
@@ -244,16 +297,18 @@
         case Bytecodes::_invokespecial  : // fall through
         case Bytecodes::_invokestatic   :
         case Bytecodes::_invokeinterface:
-          rewrite_member_reference(bcp, prefix_length+1);
+          rewrite_member_reference(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_invokedynamic:
-          rewrite_invokedynamic(bcp, prefix_length+1);
+          rewrite_invokedynamic(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_ldc:
-          maybe_rewrite_ldc(bcp, prefix_length+1, false);
+        case Bytecodes::_fast_aldc:
+          maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
           break;
         case Bytecodes::_ldc_w:
-          maybe_rewrite_ldc(bcp, prefix_length+1, true);
+        case Bytecodes::_fast_aldc_w:
+          maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
           break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
@@ -273,12 +328,13 @@
   if (nof_jsrs > 0) {
     method->set_has_jsrs();
     // Second pass will revisit this method.
-    assert(method->has_jsrs(), "");
+    assert(method->has_jsrs(), "didn't we just set this?");
   }
 }
 
 // After constant pool is created, revisit methods containing jsrs.
 methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
+  ResourceMark rm(THREAD);
   ResolveOopMapConflicts romc(method);
   methodHandle original_method = method;
   method = romc.do_potential_rewrite(CHECK_(methodHandle()));
@@ -300,7 +356,6 @@
   return method;
 }
 
-
 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   ResourceMark rm(THREAD);
   Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
@@ -343,34 +398,57 @@
   }
 
   // rewrite methods, in two passes
-  int i, len = _methods->length();
+  int len = _methods->length();
 
-  for (i = len; --i >= 0; ) {
+  for (int i = len-1; i >= 0; i--) {
     methodOop method = (methodOop)_methods->obj_at(i);
     scan_method(method);
   }
 
   // allocate constant pool cache, now that we've seen all the bytecodes
-  make_constant_pool_cache(CHECK);
+  make_constant_pool_cache(THREAD);
+
+  // Restore bytecodes to their unrewritten state if there are exceptions
+  // rewriting bytecodes or allocating the cpCache
+  if (HAS_PENDING_EXCEPTION) {
+    restore_bytecodes();
+    return;
+  }
+}
 
-  for (i = len; --i >= 0; ) {
-    methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
+// Relocate jsr/rets in a method.  This can't be done with the rewriter
+// stage because it can throw other exceptions, leaving the bytecodes
+// pointing at constant pool cache entries.
+// Link and check jvmti dependencies while we're iterating over the methods.
+// JSR292 code calls with a different set of methods, so two entry points.
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
+  objArrayHandle methods(THREAD, this_oop->methods());
+  relocate_and_link(this_oop, methods, THREAD);
+}
+
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
+                                 objArrayHandle methods, TRAPS) {
+  int len = methods->length();
+  for (int i = len-1; i >= 0; i--) {
+    methodHandle m(THREAD, (methodOop)methods->obj_at(i));
 
     if (m->has_jsrs()) {
       m = rewrite_jsrs(m, CHECK);
       // Method might have gotten rewritten.
-      _methods->obj_at_put(i, m());
+      methods->obj_at_put(i, m());
     }
 
-    // Set up method entry points for compiler and interpreter.
+    // Set up method entry points for compiler and interpreter    .
     m->link_method(m, CHECK);
 
+    // This is for JVMTI and unrelated to relocator but the last thing we do
 #ifdef ASSERT
     if (StressMethodComparator) {
       static int nmc = 0;
       for (int j = i; j >= 0 && j >= i-4; j--) {
         if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
-        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+        bool z = MethodComparator::methods_EMCP(m(),
+                   (methodOop)methods->obj_at(j));
         if (j == i && !z) {
           tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
           assert(z, "method must compare equal to itself");
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -85,13 +85,15 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(methodOop m);
-  methodHandle rewrite_jsrs(methodHandle m, TRAPS);
+  void scan_method(methodOop m, bool reverse = false);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  void rewrite_member_reference(address bcp, int offset);
-  void rewrite_invokedynamic(address bcp, int offset);
-  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
+  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
+  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
+  // Revert bytecodes in case of an exception.
+  void restore_bytecodes();
 
+  static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
  public:
   // Driver routine:
   static void rewrite(instanceKlassHandle klass, TRAPS);
@@ -100,6 +102,13 @@
   enum {
     _secondary_entry_tag = nth_bit(30)
   };
+
+  // Second pass, not gated by is_rewritten flag
+  static void relocate_and_link(instanceKlassHandle klass, TRAPS);
+  // JSR292 version to call with it's own methods.
+  static void relocate_and_link(instanceKlassHandle klass,
+                                objArrayHandle methods, TRAPS);
+
 };
 
 #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -171,7 +171,6 @@
 address    TemplateInterpreter::_throw_ArrayStoreException_entry            = NULL;
 address    TemplateInterpreter::_throw_ArithmeticException_entry            = NULL;
 address    TemplateInterpreter::_throw_ClassCastException_entry             = NULL;
-address    TemplateInterpreter::_throw_WrongMethodType_entry                = NULL;
 address    TemplateInterpreter::_throw_NullPointerException_entry           = NULL;
 address    TemplateInterpreter::_throw_StackOverflowError_entry             = NULL;
 address    TemplateInterpreter::_throw_exception_entry                      = NULL;
@@ -346,7 +345,6 @@
     Interpreter::_throw_ArrayStoreException_entry            = generate_klass_exception_handler("java/lang/ArrayStoreException"                 );
     Interpreter::_throw_ArithmeticException_entry            = generate_exception_handler("java/lang/ArithmeticException"           , "/ by zero");
     Interpreter::_throw_ClassCastException_entry             = generate_ClassCastException_handler();
-    Interpreter::_throw_WrongMethodType_entry                = generate_WrongMethodType_handler();
     Interpreter::_throw_NullPointerException_entry           = generate_exception_handler("java/lang/NullPointerException"          , NULL       );
     Interpreter::_throw_StackOverflowError_entry             = generate_StackOverflowError_handler();
   }
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -51,7 +51,6 @@
   }
   address generate_exception_handler_common(const char* name, const char* message, bool pass_oop);
   address generate_ClassCastException_handler();
-  address generate_WrongMethodType_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
   address generate_return_entry_for(TosState state, int step);
--- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,11 +566,17 @@
     q = n;
     n += _sp->block_size(n);
     assert(n > q,
-           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
-                   n, last, _sp->bottom(), _sp->end()));
+           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
+                   " while querying blk_start(" PTR_FORMAT ")"
+                   " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                   n, last, addr, _sp->bottom(), _sp->end()));
   }
-  assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
-  assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
+  assert(q <= addr,
+         err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
+                 q, addr));
+  assert(addr <= n,
+         err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
+                 addr, n));
   return q;
 }
 
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -750,10 +750,6 @@
   return NULL;
 }
 
-size_t GenCollectorPolicy::large_typearray_limit() {
-  return FastAllocateSizeLimit;
-}
-
 // Return true if any of the following is true:
 // . the allocation won't fit into the current young gen heap
 // . gc locker is occupied (jni critical section)
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,9 +280,6 @@
 
   HeapWord *satisfy_failed_allocation(size_t size, bool is_tlab);
 
-  // The size that defines a "large array".
-  virtual size_t large_typearray_limit();
-
   // Adaptive size policy
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -905,6 +905,10 @@
     to()->check_mangled_unused_area_complete();
   }
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   // update the generation and space performance counters
   update_counters();
   gch->collector_policy()->counters()->update_counters();
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -434,11 +434,9 @@
 }
 
 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
-                                         bool is_large_noref,
-                                         bool is_tlab,
                                          bool* gc_overhead_limit_was_exceeded) {
   return collector_policy()->mem_allocate_work(size,
-                                               is_tlab,
+                                               false /* is_tlab */,
                                                gc_overhead_limit_was_exceeded);
 }
 
@@ -711,15 +709,6 @@
   _gen_process_strong_tasks->set_n_threads(t);
 }
 
-class AssertIsPermClosure: public OopClosure {
-public:
-  void do_oop(oop* p) {
-    assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
-  }
-  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-static AssertIsPermClosure assert_is_perm_closure;
-
 void GenCollectedHeap::
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
@@ -962,6 +951,13 @@
   }
 }
 
+bool GenCollectedHeap::is_in_young(oop p) {
+  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
+  assert(result == _gens[0]->is_in_reserved(p),
+         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
+}
+
 // Returns "TRUE" iff "p" points into the allocated area of the heap.
 bool GenCollectedHeap::is_in(const void* p) const {
   #ifndef ASSERT
@@ -984,10 +980,16 @@
   return false;
 }
 
-// Returns "TRUE" iff "p" points into the allocated area of the heap.
-bool GenCollectedHeap::is_in_youngest(void* p) {
-  return _gens[0]->is_in(p);
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool GenCollectedHeap::is_in_partial_collection(const void* p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is young (low addr), old, perm (high addr)
+  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 }
+#endif
 
 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   for (int i = 0; i < _n_gens; i++) {
@@ -1116,11 +1118,9 @@
 
 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
   bool gc_overhead_limit_was_exceeded;
-  HeapWord* result = mem_allocate(size   /* size */,
-                                  false  /* is_large_noref */,
-                                  true   /* is_tlab */,
-                                  &gc_overhead_limit_was_exceeded);
-  return result;
+  return collector_policy()->mem_allocate_work(size /* size */,
+                                               true /* is_tlab */,
+                                               &gc_overhead_limit_was_exceeded);
 }
 
 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
@@ -1175,10 +1175,6 @@
   }
 }
 
-size_t GenCollectedHeap::large_typearray_limit() {
-  return gen_policy()->large_typearray_limit();
-}
-
 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
   void do_generation(Generation* gen) {
     gen->prepare_for_verify();
@@ -1256,7 +1252,7 @@
   return _gens[level]->gc_stats();
 }
 
-void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
+void GenCollectedHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
   if (!silent) {
     gclog_or_tty->print("permgen ");
   }
@@ -1388,6 +1384,10 @@
   generation_iterate(&blk, false);  // not old-to-young.
   perm_gen()->gc_epilogue(full);
 
+  if (!CleanChunkPoolAsync) {
+    Chunk::clean_chunk_pool();
+  }
+
   always_do_update_barrier = UseConcMarkSweepGC;
 };
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,8 +161,6 @@
   size_t max_capacity() const;
 
   HeapWord* mem_allocate(size_t size,
-                         bool   is_large_noref,
-                         bool   is_tlab,
                          bool*  gc_overhead_limit_was_exceeded);
 
   // We may support a shared contiguous allocation area, if the youngest
@@ -216,8 +214,18 @@
     }
   }
 
-  // Returns "TRUE" iff "p" points into the youngest generation.
-  bool is_in_youngest(void* p);
+  // Returns true if the reference is to an object in the reserved space
+  // for the young generation.
+  // Assumes the the young gen address range is less than that of the old gen.
+  bool is_in_young(oop p);
+
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr) {
+    return is_in_young((oop)addr);
+  }
 
   // Iteration functions.
   void oop_iterate(OopClosure* cl);
@@ -283,7 +291,7 @@
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
-    return is_in_youngest((void*)new_obj);
+    return is_in_young(new_obj);
   }
 
   // Can a compiler elide a store barrier when it writes
@@ -305,8 +313,6 @@
   // contributed as it needs.
   void release_scratch();
 
-  size_t large_typearray_limit();
-
   // Ensure parsability: override
   virtual void ensure_parsability(bool retire_tlabs);
 
@@ -351,7 +357,7 @@
   void prepare_for_verify();
 
   // Override.
-  void verify(bool allow_dirty, bool silent, bool /* option */);
+  void verify(bool allow_dirty, bool silent, VerifyOption option);
 
   // Override.
   void print() const;
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -102,6 +102,17 @@
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
+#ifdef ASSERT
+class AssertNonScavengableClosure: public OopClosure {
+public:
+  virtual void do_oop(oop* p) {
+    assert(!Universe::heap()->is_in_partial_collection(*p),
+      "Referent should not be scavengable.");  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+static AssertNonScavengableClosure assert_is_non_scavengable_closure;
+#endif
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -196,9 +207,10 @@
         CodeCache::scavenge_root_nmethods_do(code_roots);
       }
     }
-    // Verify if the code cache contents are in the perm gen
-    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
-    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
+    // Verify that the code cache contents are not subject to
+    // movement by a scavenging collection.
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
+    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   }
 
   if (!collecting_perm_gen) {
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1278,7 +1278,7 @@
   st->print_cr("}");
 }
 
-void Universe::verify(bool allow_dirty, bool silent, bool option) {
+void Universe::verify(bool allow_dirty, bool silent, VerifyOption option) {
   if (SharedSkipVerify) {
     return;
   }
--- a/hotspot/src/share/vm/memory/universe.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -109,6 +109,14 @@
   bool    _use_implicit_null_checks;
 };
 
+enum VerifyOption {
+      VerifyOption_Default = 0,
+
+      // G1
+      VerifyOption_G1UsePrevMarking = VerifyOption_Default,
+      VerifyOption_G1UseNextMarking = VerifyOption_G1UsePrevMarking + 1,
+      VerifyOption_G1UseMarkWord    = VerifyOption_G1UseNextMarking + 1
+};
 
 class Universe: AllStatic {
   // Ugh.  Universe is much too friendly.
@@ -404,7 +412,8 @@
 
   // Debugging
   static bool verify_in_progress() { return _verify_in_progress; }
-  static void verify(bool allow_dirty = true, bool silent = false, bool option = true);
+  static void verify(bool allow_dirty = true, bool silent = false,
+                     VerifyOption option = VerifyOption_Default );
   static int  verify_count()                  { return _verify_count; }
   static void print();
   static void print_on(outputStream* st);
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -310,10 +310,14 @@
     st->print(" - flags: 0x%x", cp->flags());
     if (cp->has_pseudo_string()) st->print(" has_pseudo_string");
     if (cp->has_invokedynamic()) st->print(" has_invokedynamic");
+    if (cp->has_preresolution()) st->print(" has_preresolution");
     st->cr();
   }
+  if (cp->pool_holder() != NULL) {
+    bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp);
+    st->print_cr(" - holder: " INTPTR_FORMAT "%s", cp->pool_holder(), (extra? " (extra)" : ""));
+  }
   st->print_cr(" - cache: " INTPTR_FORMAT, cp->cache());
-
   for (int index = 1; index < cp->length(); index++) {      // Index 0 is unused
     st->print(" - %3d : ", index);
     cp->tag_at(index).print_on(st);
@@ -414,10 +418,15 @@
   st->print("constant pool [%d]", cp->length());
   if (cp->has_pseudo_string()) st->print("/pseudo_string");
   if (cp->has_invokedynamic()) st->print("/invokedynamic");
+  if (cp->has_preresolution()) st->print("/preresolution");
   if (cp->operands() != NULL)  st->print("/operands[%d]", cp->operands()->length());
   cp->print_address_on(st);
   st->print(" for ");
   cp->pool_holder()->print_value_on(st);
+  if (cp->pool_holder() != NULL) {
+    bool extra = (instanceKlass::cast(cp->pool_holder())->constants() != cp);
+    if (extra)  st->print(" (extra)");
+  }
   if (cp->cache() != NULL) {
     st->print(" cache=" PTR_FORMAT, cp->cache());
   }
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -266,6 +266,29 @@
 }
 
 
+methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
+                                                   int which, Bytecodes::Code invoke_code) {
+  assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
+  if (cpool->cache() == NULL)  return false;  // nothing to load yet
+  int cache_index = which - CPCACHE_INDEX_TAG;
+  if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
+    if (PrintMiscellaneous && (Verbose||WizardMode)) {
+      tty->print_cr("bad operand %d for %d in:", which, invoke_code); cpool->print();
+    }
+    return NULL;
+  }
+  ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+  if (invoke_code != Bytecodes::_illegal)
+    return e->get_method_if_resolved(invoke_code, cpool);
+  Bytecodes::Code bc;
+  if ((bc = e->bytecode_1()) != (Bytecodes::Code)0)
+    return e->get_method_if_resolved(bc, cpool);
+  if ((bc = e->bytecode_2()) != (Bytecodes::Code)0)
+    return e->get_method_if_resolved(bc, cpool);
+  return NULL;
+}
+
+
 Symbol* constantPoolOopDesc::impl_name_ref_at(int which, bool uncached) {
   int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
   return symbol_at(name_index);
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -103,7 +103,8 @@
 
   enum FlagBit {
     FB_has_invokedynamic = 1,
-    FB_has_pseudo_string = 2
+    FB_has_pseudo_string = 2,
+    FB_has_preresolution = 3
   };
 
   int flags() const                         { return _flags; }
@@ -179,8 +180,10 @@
 
   bool has_pseudo_string() const            { return flag_at(FB_has_pseudo_string); }
   bool has_invokedynamic() const            { return flag_at(FB_has_invokedynamic); }
+  bool has_preresolution() const            { return flag_at(FB_has_preresolution); }
   void set_pseudo_string()                  {    set_flag_at(FB_has_pseudo_string); }
   void set_invokedynamic()                  {    set_flag_at(FB_has_invokedynamic); }
+  void set_preresolution()                  {    set_flag_at(FB_has_preresolution); }
 
   // Klass holding pool
   klassOop pool_holder() const              { return _pool_holder; }
@@ -663,6 +666,8 @@
   friend class SystemDictionary;
 
   // Used by compiler to prevent classloading.
+  static methodOop method_at_if_loaded        (constantPoolHandle this_oop, int which,
+                                               Bytecodes::Code bc = Bytecodes::_illegal);
   static klassOop klass_at_if_loaded          (constantPoolHandle this_oop, int which);
   static klassOop klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
   // Same as above - but does LinkResolving.
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -295,6 +295,50 @@
 }
 
 
+methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) {
+  assert(invoke_code > (Bytecodes::Code)0, "bad query");
+  if (is_secondary_entry()) {
+    return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool);
+  }
+  // Decode the action of set_method and set_interface_call
+  if (bytecode_1() == invoke_code) {
+    oop f1 = _f1;
+    if (f1 != NULL) {
+      switch (invoke_code) {
+      case Bytecodes::_invokeinterface:
+        assert(f1->is_klass(), "");
+        return klassItable::method_for_itable_index(klassOop(f1), (int) f2());
+      case Bytecodes::_invokestatic:
+      case Bytecodes::_invokespecial:
+        assert(f1->is_method(), "");
+        return methodOop(f1);
+      }
+    }
+  }
+  if (bytecode_2() == invoke_code) {
+    switch (invoke_code) {
+    case Bytecodes::_invokevirtual:
+      if (is_vfinal()) {
+        // invokevirtual
+        methodOop m = methodOop((intptr_t) f2());
+        assert(m->is_method(), "");
+        return m;
+      } else {
+        int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
+        if (cpool->tag_at(holder_index).is_klass()) {
+          klassOop klass = cpool->resolved_klass_at(holder_index);
+          if (!Klass::cast(klass)->oop_is_instance())
+            klass = SystemDictionary::Object_klass();
+          return instanceKlass::cast(klass)->method_at_vtable((int) f2());
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+
+
 class LocalOopClosure: public OopClosure {
  private:
   void (*_f)(oop*);
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -194,6 +194,8 @@
     methodHandle signature_invoker               // determines signature information
   );
 
+  methodOop get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool);
+
   // For JVM_CONSTANT_InvokeDynamic cache entries:
   void initialize_bootstrap_method_index_in_cache(int bsm_cache_index);
   int  bootstrap_method_index_in_cache();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -335,6 +335,9 @@
         this_oop->rewrite_class(CHECK_false);
       }
 
+      // relocate jsrs and link methods after they are all rewritten
+      this_oop->relocate_and_link_methods(CHECK_false);
+
       // Initialize the vtable and interface table after
       // methods have been rewritten since rewrite may
       // fabricate new methodOops.
@@ -365,17 +368,8 @@
 
 
 // Rewrite the byte codes of all of the methods of a class.
-// Three cases:
-//    During the link of a newly loaded class.
-//    During the preloading of classes to be written to the shared spaces.
-//      - Rewrite the methods and update the method entry points.
-//
-//    During the link of a class in the shared spaces.
-//      - The methods were already rewritten, update the metho entry points.
-//
 // The rewriter must be called exactly once. Rewriting must happen after
 // verification but before the first method of the class is executed.
-
 void instanceKlass::rewrite_class(TRAPS) {
   assert(is_loaded(), "must be loaded");
   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
@@ -383,10 +377,19 @@
     assert(this_oop()->is_shared(), "rewriting an unshared class?");
     return;
   }
-  Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
+  Rewriter::rewrite(this_oop, CHECK);
   this_oop->set_rewritten();
 }
 
+// Now relocate and link method entry points after class is rewritten.
+// This is outside is_rewritten flag. In case of an exception, it can be
+// executed more than once.
+void instanceKlass::relocate_and_link_methods(TRAPS) {
+  assert(is_loaded(), "must be loaded");
+  instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+  Rewriter::relocate_and_link(this_oop, CHECK);
+}
+
 
 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   // Make sure klass is linked (verified) before initialization
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -392,6 +392,7 @@
   bool link_class_or_fail(TRAPS); // returns false on failure
   void unlink_class();
   void rewrite_class(TRAPS);
+  void relocate_and_link_methods(TRAPS);
   methodOop class_initializer();
 
   // set the class to initialized if no static initializer is present
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -397,7 +397,7 @@
 
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the referent
       // field is not part of the oop mask and therefore skipped by the
       // regular verify code.
@@ -415,7 +415,7 @@
   if (next != NULL) {
     guarantee(next->is_oop(), "next field verify failed");
     guarantee(next->is_instanceRef(), "next field verify failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the next field is
       // not part of the oop mask and therefore skipped by the regular
       // verify code.
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -49,6 +49,7 @@
 #include "runtime/relocator.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
+#include "utilities/quickSort.hpp"
 #include "utilities/xmlstream.hpp"
 
 
@@ -693,7 +694,10 @@
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
 void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
-  assert(_i2i_entry == NULL, "should only be called once");
+  // If the code cache is full, we may reenter this function for the
+  // leftover methods that weren't linked.
+  if (_i2i_entry != NULL) return;
+
   assert(_adapter == NULL, "init'd to NULL" );
   assert( _code == NULL, "nothing compiled yet" );
 
@@ -925,14 +929,40 @@
   name->increment_refcount();
   signature->increment_refcount();
 
+  // record non-BCP method types in the constant pool
+  GrowableArray<KlassHandle>* extra_klasses = NULL;
+  for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) {
+    oop ptype = (i == -1
+                 ? java_lang_invoke_MethodType::rtype(method_type())
+                 : java_lang_invoke_MethodType::ptype(method_type(), i));
+    klassOop klass = check_non_bcp_klass(java_lang_Class::as_klassOop(ptype));
+    if (klass != NULL) {
+      if (extra_klasses == NULL)
+        extra_klasses = new GrowableArray<KlassHandle>(len+1);
+      bool dup = false;
+      for (int j = 0; j < extra_klasses->length(); j++) {
+        if (extra_klasses->at(j) == klass) { dup = true; break; }
+      }
+      if (!dup)
+        extra_klasses->append(KlassHandle(THREAD, klass));
+    }
+  }
+
+  int extra_klass_count = (extra_klasses == NULL ? 0 : extra_klasses->length());
+  int cp_length = _imcp_limit + extra_klass_count;
   constantPoolHandle cp;
   {
-    constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty));
+    constantPoolOop cp_oop = oopFactory::new_constantPool(cp_length, IsSafeConc, CHECK_(empty));
     cp = constantPoolHandle(THREAD, cp_oop);
   }
   cp->symbol_at_put(_imcp_invoke_name,       name);
   cp->symbol_at_put(_imcp_invoke_signature,  signature);
   cp->string_at_put(_imcp_method_type_value, Universe::the_null_string());
+  for (int j = 0; j < extra_klass_count; j++) {
+    KlassHandle klass = extra_klasses->at(j);
+    cp->klass_at_put(_imcp_limit + j, klass());
+  }
+  cp->set_preresolution();
   cp->set_pool_holder(holder());
 
   // set up the fancy stuff:
@@ -981,6 +1011,14 @@
   return m;
 }
 
+klassOop methodOopDesc::check_non_bcp_klass(klassOop klass) {
+  if (klass != NULL && Klass::cast(klass)->class_loader() != NULL) {
+    if (Klass::cast(klass)->oop_is_objArray())
+      klass = objArrayKlass::cast(klass)->bottom_klass();
+    return klass;
+  }
+  return NULL;
+}
 
 
 methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
@@ -1204,41 +1242,6 @@
   if (WizardMode) signature()->print_symbol_on(st);
 }
 
-
-extern "C" {
-  static int method_compare(methodOop* a, methodOop* b) {
-    return (*a)->name()->fast_compare((*b)->name());
-  }
-
-  // Prevent qsort from reordering a previous valid sort by
-  // considering the address of the methodOops if two methods
-  // would otherwise compare as equal.  Required to preserve
-  // optimal access order in the shared archive.  Slower than
-  // method_compare, only used for shared archive creation.
-  static int method_compare_idempotent(methodOop* a, methodOop* b) {
-    int i = method_compare(a, b);
-    if (i != 0) return i;
-    return ( a < b ? -1 : (a == b ? 0 : 1));
-  }
-
-  // We implement special compare versions for narrow oops to avoid
-  // testing for UseCompressedOops on every comparison.
-  static int method_compare_narrow(narrowOop* a, narrowOop* b) {
-    methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a);
-    methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b);
-    return m->name()->fast_compare(n->name());
-  }
-
-  static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) {
-    int i = method_compare_narrow(a, b);
-    if (i != 0) return i;
-    return ( a < b ? -1 : (a == b ? 0 : 1));
-  }
-
-  typedef int (*compareFn)(const void*, const void*);
-}
-
-
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 static void reorder_based_on_method_index(objArrayOop methods,
                                           objArrayOop annotations,
@@ -1262,6 +1265,14 @@
   }
 }
 
+// Comparer for sorting an object array containing
+// methodOops.
+template <class T>
+static int method_comparator(T a, T b) {
+  methodOop m = (methodOop)oopDesc::decode_heap_oop_not_null(a);
+  methodOop n = (methodOop)oopDesc::decode_heap_oop_not_null(b);
+  return m->name()->fast_compare(n->name());
+}
 
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 void methodOopDesc::sort_methods(objArrayOop methods,
@@ -1284,30 +1295,19 @@
         m->set_method_idnum(i);
       }
     }
-
-    // Use a simple bubble sort for small number of methods since
-    // qsort requires a functional pointer call for each comparison.
-    if (length < 8) {
-      bool sorted = true;
-      for (int i=length-1; i>0; i--) {
-        for (int j=0; j<i; j++) {
-          methodOop m1 = (methodOop)methods->obj_at(j);
-          methodOop m2 = (methodOop)methods->obj_at(j+1);
-          if ((uintptr_t)m1->name() > (uintptr_t)m2->name()) {
-            methods->obj_at_put(j, m2);
-            methods->obj_at_put(j+1, m1);
-            sorted = false;
-          }
-        }
-        if (sorted) break;
-          sorted = true;
+    {
+      No_Safepoint_Verifier nsv;
+      if (UseCompressedOops) {
+        QuickSort::sort<narrowOop>((narrowOop*)(methods->base()), length, method_comparator<narrowOop>, idempotent);
+      } else {
+        QuickSort::sort<oop>((oop*)(methods->base()), length, method_comparator<oop>, idempotent);
       }
-    } else {
-      compareFn compare =
-        (UseCompressedOops ?
-         (compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow):
-         (compareFn) (idempotent ? method_compare_idempotent : method_compare));
-      qsort(methods->base(), length, heapOopSize, compare);
+      if (UseConcMarkSweepGC) {
+        // For CMS we need to dirty the cards for the array
+        BarrierSet* bs = Universe::heap()->barrier_set();
+        assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
+        bs->write_ref_array(methods->base(), length);
+      }
     }
 
     // Sort annotations if necessary
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -600,6 +600,7 @@
                                          Symbol* signature, //anything at all
                                          Handle method_type,
                                          TRAPS);
+  static klassOop check_non_bcp_klass(klassOop klass);
   // these operate only on invoke methods:
   oop method_handle_type() const;
   static jint* method_type_offsets_chain();  // series of pointer-offsets, terminated by -1
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -84,11 +84,7 @@
       KlassHandle h_k(THREAD, as_klassOop());
       typeArrayOop t;
       CollectedHeap* ch = Universe::heap();
-      if (size < ch->large_typearray_limit()) {
-        t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
-      } else {
-        t = (typeArrayOop)CollectedHeap::large_typearray_allocate(h_k, (int)size, length, CHECK_NULL);
-      }
+      t = (typeArrayOop)CollectedHeap::array_allocate(h_k, (int)size, length, CHECK_NULL);
       assert(t->is_parsable(), "Don't publish unless parsable");
       return t;
     } else {
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -35,14 +35,16 @@
 
 //=============================================================================
 //------------------------------InlineTree-------------------------------------
-InlineTree::InlineTree( Compile* c,
-                        const InlineTree *caller_tree, ciMethod* callee,
-                        JVMState* caller_jvms, int caller_bci,
-                        float site_invoke_ratio, int site_depth_adjust)
-: C(c), _caller_jvms(caller_jvms),
-  _caller_tree((InlineTree*)caller_tree),
-  _method(callee), _site_invoke_ratio(site_invoke_ratio),
-  _site_depth_adjust(site_depth_adjust),
+InlineTree::InlineTree(Compile* c,
+                       const InlineTree *caller_tree, ciMethod* callee,
+                       JVMState* caller_jvms, int caller_bci,
+                       float site_invoke_ratio, int max_inline_level) :
+  C(c),
+  _caller_jvms(caller_jvms),
+  _caller_tree((InlineTree*) caller_tree),
+  _method(callee),
+  _site_invoke_ratio(site_invoke_ratio),
+  _max_inline_level(max_inline_level),
   _count_inline_bcs(method()->code_size())
 {
   NOT_PRODUCT(_count_inlines = 0;)
@@ -66,10 +68,13 @@
 }
 
 InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
-                       float site_invoke_ratio, int site_depth_adjust)
-: C(c), _caller_jvms(caller_jvms), _caller_tree(NULL),
-  _method(callee_method), _site_invoke_ratio(site_invoke_ratio),
-  _site_depth_adjust(site_depth_adjust),
+                       float site_invoke_ratio, int max_inline_level) :
+  C(c),
+  _caller_jvms(caller_jvms),
+  _caller_tree(NULL),
+  _method(callee_method),
+  _site_invoke_ratio(site_invoke_ratio),
+  _max_inline_level(max_inline_level),
   _count_inline_bcs(method()->code_size())
 {
   NOT_PRODUCT(_count_inlines = 0;)
@@ -94,7 +99,7 @@
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
     if (PrintInlining && Verbose) {
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
     return NULL;
@@ -109,7 +114,7 @@
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
     if (PrintInlining && Verbose) {
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
     return NULL;
@@ -149,9 +154,9 @@
 
     max_inline_size = C->freq_inline_size();
     if (size <= max_inline_size && TraceFrequencyInlining) {
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count);
-      CompileTask::print_inline_indent(inline_depth());
+      CompileTask::print_inline_indent(inline_level());
       callee_method->print();
       tty->cr();
     }
@@ -322,7 +327,7 @@
   if (!C->do_inlining() && InlineAccessors) {
     return "not an accessor";
   }
-  if( inline_depth() > MaxInlineLevel ) {
+  if (inline_level() > _max_inline_level) {
     return "inlining too deep";
   }
 
@@ -392,7 +397,7 @@
 //------------------------------print_inlining---------------------------------
 // Really, the failure_msg can be a success message also.
 void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
-  CompileTask::print_inlining(callee_method, inline_depth(), caller_bci, failure_msg ? failure_msg : "inline");
+  CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
   if (callee_method == NULL)  tty->print(" callee not monotonic or profiled");
   if (Verbose && callee_method) {
     const InlineTree *top = this;
@@ -500,25 +505,25 @@
   if (old_ilt != NULL) {
     return old_ilt;
   }
-  int new_depth_adjust = 0;
+  int max_inline_level_adjust = 0;
   if (caller_jvms->method() != NULL) {
     if (caller_jvms->method()->is_method_handle_adapter())
-      new_depth_adjust -= 1;  // don't count actions in MH or indy adapter frames
+      max_inline_level_adjust += 1;  // don't count actions in MH or indy adapter frames
     else if (callee_method->is_method_handle_invoke()) {
-      new_depth_adjust -= 1;  // don't count method handle calls from java.lang.invoke implem
+      max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (new_depth_adjust != 0 && PrintInlining) {
-      CompileTask::print_inline_indent(inline_depth());
+    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+      CompileTask::print_inline_indent(inline_level());
       tty->print_cr(" \\-> discounting inline depth");
     }
-    if (new_depth_adjust != 0 && C->log()) {
+    if (max_inline_level_adjust != 0 && C->log()) {
       int id1 = C->log()->identify(caller_jvms->method());
       int id2 = C->log()->identify(callee_method);
-      C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2);
+      C->log()->elem("inline_level_discount caller='%d' callee='%d'", id1, id2);
     }
   }
-  InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust);
-  _subtrees.append( ilt );
+  InlineTree* ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust);
+  _subtrees.append(ilt);
 
   NOT_PRODUCT( _count_inlines += 1; )
 
@@ -543,7 +548,7 @@
   Compile* C = Compile::current();
 
   // Root of inline tree
-  InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0);
+  InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, MaxInlineLevel);
 
   return ilt;
 }
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -698,6 +698,46 @@
 }
 
 
+CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
+                                                       ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
+  if (method_handle->Opcode() == Op_ConP) {
+    const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
+    ciObject* const_oop = oop_ptr->const_oop();
+    ciMethodHandle* method_handle = const_oop->as_method_handle();
+
+    // Set the callee to have access to the class and signature in
+    // the MethodHandleCompiler.
+    method_handle->set_callee(callee);
+    method_handle->set_caller(caller);
+    method_handle->set_call_profile(profile);
+
+    // Get an adapter for the MethodHandle.
+    ciMethod* target_method = method_handle->get_method_handle_adapter();
+    if (target_method != NULL) {
+      CallGenerator* hit_cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, 1);
+      if (hit_cg != NULL && hit_cg->is_inline())
+        return hit_cg;
+    }
+  } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
+             method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
+    // selectAlternative idiom merging two constant MethodHandles.
+    // Generate a guard so that each can be inlined.  We might want to
+    // do more inputs at later point but this gets the most common
+    // case.
+    const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
+    ciObject* const_oop = oop_ptr->const_oop();
+    ciMethodHandle* mh = const_oop->as_method_handle();
+
+    CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
+    CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
+    if (cg1 != NULL && cg2 != NULL) {
+      return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
+    }
+  }
+  return NULL;
+}
+
+
 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
@@ -707,33 +747,45 @@
     log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
   }
 
-  // Get the constant pool cache from the caller class.
-  ciMethod* caller_method = jvms->method();
-  ciBytecodeStream str(caller_method);
-  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
-  ciCPCache* cpcache = str.get_cpcache();
-
-  // Get the offset of the CallSite from the constant pool cache
-  // pointer.
-  int index = str.get_method_index();
-  size_t call_site_offset = cpcache->get_f1_offset(index);
-
-  // Load the CallSite object from the constant pool cache.
-  const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
-  Node* cpcache_adr   = kit.makecon(cpcache_ptr);
-  Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
-  Node* call_site     = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
-
-  // Load the target MethodHandle from the CallSite object.
-  Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
-  Node* target_mh  = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
-
-  // Check if the MethodHandle is still the same.
   const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
   Node* predicted_mh = kit.makecon(predicted_mh_ptr);
 
-  Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
-  Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  Node* bol = NULL;
+  int bc = jvms->method()->java_code_at_bci(jvms->bci());
+  if (bc == Bytecodes::_invokespecial) {
+    // This is the selectAlternative idiom for guardWithTest
+    Node* receiver = kit.argument(0);
+
+    // Check if the MethodHandle is the expected one
+    Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(receiver, predicted_mh));
+    bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  } else {
+    assert(bc == Bytecodes::_invokedynamic, "must be");
+    // Get the constant pool cache from the caller class.
+    ciMethod* caller_method = jvms->method();
+    ciBytecodeStream str(caller_method);
+    str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+    ciCPCache* cpcache = str.get_cpcache();
+
+    // Get the offset of the CallSite from the constant pool cache
+    // pointer.
+    int index = str.get_method_index();
+    size_t call_site_offset = cpcache->get_f1_offset(index);
+
+    // Load the CallSite object from the constant pool cache.
+    const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+    Node* cpcache_adr   = kit.makecon(cpcache_ptr);
+    Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+    Node* call_site     = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+    // Load the target MethodHandle from the CallSite object.
+    Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
+    Node* target_mh  = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+    // Check if the MethodHandle is still the same.
+    Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
+    bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+  }
   IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
   kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
   Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
--- a/hotspot/src/share/vm/opto/callGenerator.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -111,6 +111,8 @@
   static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
 
+  static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
+
   // How to generate a replace a direct call with an inline version
   static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
 
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1556,7 +1556,9 @@
 
   Node *top = phase->C->top();
   bool new_phi = (outcnt() == 0); // transforming new Phi
-  assert(!can_reshape || !new_phi, "for igvn new phi should be hooked");
+  // No change for igvn if new phi is not hooked
+  if (new_phi && can_reshape)
+    return NULL;
 
   // The are 2 situations when only one valid phi's input is left
   // (in addition to Region input).
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1206,11 +1206,7 @@
     // Make sure the Bottom and NotNull variants alias the same.
     // Also, make sure exact and non-exact variants alias the same.
     if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
-      if (ta->const_oop()) {
-        tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
-      } else {
-        tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
-      }
+      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
     }
   }
 
--- a/hotspot/src/share/vm/opto/doCall.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -123,24 +123,9 @@
       GraphKit kit(jvms);
       Node* n = kit.argument(0);
 
-      if (n->Opcode() == Op_ConP) {
-        const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
-        ciObject* const_oop = oop_ptr->const_oop();
-        ciMethodHandle* method_handle = const_oop->as_method_handle();
-
-        // Set the callee to have access to the class and signature in
-        // the MethodHandleCompiler.
-        method_handle->set_callee(call_method);
-        method_handle->set_caller(caller);
-        method_handle->set_call_profile(&profile);
-
-        // Get an adapter for the MethodHandle.
-        ciMethod* target_method = method_handle->get_method_handle_adapter();
-        if (target_method != NULL) {
-          CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
-          if (hit_cg != NULL && hit_cg->is_inline())
-            return hit_cg;
-        }
+      CallGenerator* cg = CallGenerator::for_method_handle_inline(n, jvms, caller, call_method, profile);
+      if (cg != NULL) {
+        return cg;
       }
 
       return CallGenerator::for_direct_call(call_method);
@@ -157,7 +142,7 @@
       // the MethodHandleCompiler.
       method_handle->set_callee(call_method);
       method_handle->set_caller(caller);
-      method_handle->set_call_profile(&profile);
+      method_handle->set_call_profile(profile);
 
       // Get an adapter for the MethodHandle.
       ciMethod* target_method = method_handle->get_invokedynamic_adapter();
@@ -198,7 +183,7 @@
         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
         float site_invoke_ratio = prof_factor;
         // Note:  ilt is for the root of this parse, not the present call site.
-        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
+        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
       }
       WarmCallInfo scratch_ci;
       if (!UseOldInlining)
--- a/hotspot/src/share/vm/opto/escape.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1747,6 +1747,25 @@
   _collecting = false;
   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
 
+  if (EliminateLocks) {
+    // Mark locks before changing ideal graph.
+    int cnt = C->macro_count();
+    for( int i=0; i < cnt; i++ ) {
+      Node *n = C->macro_node(i);
+      if (n->is_AbstractLock()) { // Lock and Unlock nodes
+        AbstractLockNode* alock = n->as_AbstractLock();
+        if (!alock->is_eliminated()) {
+          PointsToNode::EscapeState es = escape_state(alock->obj_node());
+          assert(es != PointsToNode::UnknownEscape, "should know");
+          if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
+            // Mark it eliminated
+            alock->set_eliminated();
+          }
+        }
+      }
+    }
+  }
+
 #ifndef PRODUCT
   if (PrintEscapeAnalysis) {
     dump(); // Dump ConnectionGraph
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -615,6 +615,7 @@
       }
     }
 
+#ifdef ASSERT
     if (node->debug_orig() != NULL) {
       stringStream dorigStream;
       Node* dorig = node->debug_orig();
@@ -629,6 +630,7 @@
       }
       print_prop("debug_orig", dorigStream.as_string());
     }
+#endif
 
     if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) {
       buffer[0] = 0;
--- a/hotspot/src/share/vm/opto/lcm.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -45,6 +45,9 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc
+# include "adfiles/ad_ppc.hpp"
+#endif
 
 // Optimization - Graph Style
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -5225,15 +5225,16 @@
 
   // Look at the alignment of the starting offsets.
   int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
-  const intptr_t BIG_NEG = -128;
-  assert(BIG_NEG + 2*abase < 0, "neg enough");
-
-  intptr_t src_off  = abase + ((intptr_t) find_int_con(src_offset, -1)  << scale);
-  intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale);
-  if (src_off < 0 || dest_off < 0)
+
+  intptr_t src_off_con  = (intptr_t) find_int_con(src_offset, -1);
+  intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
+  if (src_off_con < 0 || dest_off_con < 0)
     // At present, we can only understand constants.
     return false;
 
+  intptr_t src_off  = abase + (src_off_con  << scale);
+  intptr_t dest_off = abase + (dest_off_con << scale);
+
   if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
     // Non-aligned; too bad.
     // One more chance:  Pick off an initial 32-bit word.
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -83,7 +83,7 @@
 #ifdef ASSERT
   BoolTest::mask bt = cl->loopexit()->test_trip();
   assert(bt == BoolTest::lt || bt == BoolTest::gt ||
-         (bt == BoolTest::ne && !LoopLimitCheck), "canonical test is expected");
+         bt == BoolTest::ne, "canonical test is expected");
 #endif
 
   Node* init_n = cl->init_trip();
@@ -824,13 +824,23 @@
 //------------------------------clone_up_backedge_goo--------------------------
 // If Node n lives in the back_ctrl block and cannot float, we clone a private
 // version of n in preheader_ctrl block and return that, otherwise return n.
-Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
+Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
   if( get_ctrl(n) != back_ctrl ) return n;
 
+  // Only visit once
+  if (visited.test_set(n->_idx)) {
+    Node *x = clones.find(n->_idx);
+    if (x != NULL)
+      return x;
+    return n;
+  }
+
   Node *x = NULL;               // If required, a clone of 'n'
   // Check for 'n' being pinned in the backedge.
   if( n->in(0) && n->in(0) == back_ctrl ) {
+    assert(clones.find(n->_idx) == NULL, "dead loop");
     x = n->clone();             // Clone a copy of 'n' to preheader
+    clones.push(x, n->_idx);
     x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
   }
 
@@ -838,10 +848,13 @@
   // If there are no changes we can just return 'n', otherwise
   // we need to clone a private copy and change it.
   for( uint i = 1; i < n->req(); i++ ) {
-    Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) );
+    Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
     if( g != n->in(i) ) {
-      if( !x )
+      if( !x ) {
+        assert(clones.find(n->_idx) == NULL, "dead loop");
         x = n->clone();
+        clones.push(x, n->_idx);
+      }
       x->set_req(i, g);
     }
   }
@@ -960,6 +973,9 @@
   post_head->set_req(LoopNode::EntryControl, zer_taken);
   set_idom(post_head, zer_taken, dd_main_exit);
 
+  Arena *a = Thread::current()->resource_area();
+  VectorSet visited(a);
+  Node_Stack clones(a, main_head->back_control()->outcnt());
   // Step A3: Make the fall-in values to the post-loop come from the
   // fall-out values of the main-loop.
   for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
@@ -968,7 +984,8 @@
       Node *post_phi = old_new[main_phi->_idx];
       Node *fallmain  = clone_up_backedge_goo(main_head->back_control(),
                                               post_head->init_control(),
-                                              main_phi->in(LoopNode::LoopBackControl));
+                                              main_phi->in(LoopNode::LoopBackControl),
+                                              visited, clones);
       _igvn.hash_delete(post_phi);
       post_phi->set_req( LoopNode::EntryControl, fallmain );
     }
@@ -1032,6 +1049,8 @@
   main_head->set_req(LoopNode::EntryControl, min_taken);
   set_idom(main_head, min_taken, dd_main_head);
 
+  visited.Clear();
+  clones.clear();
   // Step B3: Make the fall-in values to the main-loop come from the
   // fall-out values of the pre-loop.
   for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
@@ -1040,7 +1059,8 @@
       Node *pre_phi = old_new[main_phi->_idx];
       Node *fallpre  = clone_up_backedge_goo(pre_head->back_control(),
                                              main_head->init_control(),
-                                             pre_phi->in(LoopNode::LoopBackControl));
+                                             pre_phi->in(LoopNode::LoopBackControl),
+                                             visited, clones);
       _igvn.hash_delete(main_phi);
       main_phi->set_req( LoopNode::EntryControl, fallpre );
     }
@@ -1070,9 +1090,11 @@
   // direction:
   // positive stride use <
   // negative stride use >
+  //
+  // not-equal test is kept for post loop to handle case
+  // when init > limit when stride > 0 (and reverse).
 
   if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
-    assert(!LoopLimitCheck, "only canonical tests (lt or gt) are expected");
 
     BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
     // Modify pre loop end condition
@@ -1292,9 +1314,23 @@
       }
       assert(new_limit != NULL, "");
       // Replace in loop test.
-      _igvn.hash_delete(cmp);
-      cmp->set_req(2, new_limit);
-
+      assert(loop_end->in(1)->in(1) == cmp, "sanity");
+      if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
+        // Don't need to create new test since only one user.
+        _igvn.hash_delete(cmp);
+        cmp->set_req(2, new_limit);
+      } else {
+        // Create new test since it is shared.
+        Node* ctrl2 = loop_end->in(0);
+        Node* cmp2  = cmp->clone();
+        cmp2->set_req(2, new_limit);
+        register_new_node(cmp2, ctrl2);
+        Node* bol2 = loop_end->in(1)->clone();
+        bol2->set_req(1, cmp2);
+        register_new_node(bol2, ctrl2);
+        _igvn.hash_delete(loop_end);
+        loop_end->set_req(1, bol2);
+      }
       // Step 3: Find the min-trip test guaranteed before a 'main' loop.
       // Make it a 1-trip test (means at least 2 trips).
 
@@ -1453,6 +1489,23 @@
   return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
 }
 
+//------------------------------adjust_limit-----------------------------------
+// Helper function for add_constraint().
+Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
+  // Compute "I :: (limit-offset)/scale"
+  Node *con = new (C, 3) SubINode(rc_limit, offset);
+  register_new_node(con, pre_ctrl);
+  Node *X = new (C, 3) DivINode(0, con, scale);
+  register_new_node(X, pre_ctrl);
+
+  // Adjust loop limit
+  loop_limit = (stride_con > 0)
+               ? (Node*)(new (C, 3) MinINode(loop_limit, X))
+               : (Node*)(new (C, 3) MaxINode(loop_limit, X));
+  register_new_node(loop_limit, pre_ctrl);
+  return loop_limit;
+}
+
 //------------------------------add_constraint---------------------------------
 // Constrain the main loop iterations so the conditions:
 //    low_limit <= scale_con * I + offset  <  upper_limit
@@ -1469,7 +1522,11 @@
   // pre-loop must check for underflow and the post-loop for overflow.
   // Negative stride*scale reverses this; pre-loop checks for overflow and
   // post-loop for underflow.
-  if (stride_con*scale_con > 0) {
+
+  Node *scale = _igvn.intcon(scale_con);
+  set_ctrl(scale, C->root());
+
+  if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
     // The overflow limit: scale*I+offset < upper_limit
     // For main-loop compute
     //   ( if (scale > 0) /* and stride > 0 */
@@ -1478,23 +1535,10 @@
     //       I > (upper_limit-offset)/scale
     //   )
     //
-    // (upper_limit-offset) may overflow when offset < 0.
+    // (upper_limit-offset) may overflow or underflow.
     // But it is fine since main loop will either have
     // less iterations or will be skipped in such case.
-    Node *con = new (C, 3) SubINode(upper_limit, offset);
-    register_new_node(con, pre_ctrl);
-    Node *scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    Node *X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust main-loop last iteration
-    Node *loop_limit = *main_limit;
-    loop_limit = (stride_con > 0) // scale > 0
-      ? (Node*)(new (C, 3) MinINode(loop_limit, X))
-      : (Node*)(new (C, 3) MaxINode(loop_limit, X));
-    register_new_node(loop_limit, pre_ctrl);
-    *main_limit = loop_limit;
+    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
 
     // The underflow limit: low_limit <= scale*I+offset.
     // For pre-loop compute
@@ -1509,76 +1553,33 @@
     if (low_limit->get_int() == -max_jint) {
       if (!RangeLimitCheck) return;
       // We need this guard when scale*pre_limit+offset >= limit
-      // due to underflow so we need execute pre-loop until
-      // scale*I+offset >= min_int. But (low_limit-offset) will
-      // underflow when offset > 0 and X will be > original_limit.
-      // To avoid it we replace offset = offset > 0 ? 0 : offset
-      // and add min(pre_limit, original_limit).
+      // due to underflow. So we need execute pre-loop until
+      // scale*I+offset >= min_int. But (min_int-offset) will
+      // underflow when offset > 0 and X will be > original_limit
+      // when stride > 0. To avoid it we replace positive offset with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
       Node* shift = _igvn.intcon(31);
       set_ctrl(shift, C->root());
-      Node *neg_off = new (C, 3) RShiftINode(offset, shift);
-      register_new_node(neg_off, pre_ctrl);
-      offset = new (C, 3) AndINode(offset, neg_off);
+      Node* sign = new (C, 3) RShiftINode(offset, shift);
+      register_new_node(sign, pre_ctrl);
+      offset = new (C, 3) AndINode(offset, sign);
       register_new_node(offset, pre_ctrl);
     } else {
       assert(low_limit->get_int() == 0, "wrong low limit for range check");
       // The only problem we have here when offset == min_int
-      // since (0-min_int) == min_int. It may be fine for scale > 0
-      // but for scale < 0 X will be < original_limit.
+      // since (0-min_int) == min_int. It may be fine for stride > 0
+      // but for stride < 0 X will be < original_limit. To avoid it
+      // max(pre_limit, original_limit) is used in do_range_check().
     }
-    con = new (C, 3) SubINode(low_limit, offset);
-    register_new_node(con, pre_ctrl);
-    scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust pre-loop last iteration
-    loop_limit = *pre_limit;
-    loop_limit = (stride_con > 0) // scale > 0
-      ? (Node*)(new (C, 3) MaxINode(loop_limit, X))
-      : (Node*)(new (C, 3) MinINode(loop_limit, X));
-    register_new_node( loop_limit, pre_ctrl );
-    *pre_limit = loop_limit;
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
 
   } else { // stride_con*scale_con < 0
     // For negative stride*scale pre-loop checks for overflow and
     // post-loop for underflow.
     //
-    // The underflow limit: low_limit <= scale*I+offset.
-    // For main-loop compute
-    //   scale*I+offset+1 > low_limit
-    //   ( if (scale < 0) /* and stride > 0 */
-    //       I < (low_limit-(offset+1))/scale
-    //     else /* scale < 0 and stride < 0 */
-    //       I > (low_limit-(offset+1))/scale
-    //   )
-
-    if (low_limit->get_int() == -max_jint) {
-      if (!RangeLimitCheck) return;
-    } else {
-      assert(low_limit->get_int() == 0, "wrong low limit for range check");
-    }
-
-    Node *one  = _igvn.intcon(1);
-    set_ctrl(one, C->root());
-    Node *plus_one = new (C, 3) AddINode(offset, one);
-    register_new_node( plus_one, pre_ctrl );
-    Node *con = new (C, 3) SubINode(low_limit, plus_one);
-    register_new_node(con, pre_ctrl);
-    Node *scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    Node *X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust main-loop last iteration
-    Node *loop_limit = *main_limit;
-    loop_limit = (stride_con > 0) // scale < 0
-      ? (Node*)(new (C, 3) MinINode(loop_limit, X))
-      : (Node*)(new (C, 3) MaxINode(loop_limit, X));
-    register_new_node(loop_limit, pre_ctrl);
-    *main_limit = loop_limit;
-
     // The overflow limit: scale*I+offset < upper_limit
     // For pre-loop compute
     //   NOT(scale*I+offset < upper_limit)
@@ -1586,26 +1587,55 @@
     //   scale*I+offset+1 > upper_limit
     //   ( if (scale < 0) /* and stride > 0 */
     //       I < (upper_limit-(offset+1))/scale
-    //     else /* scale < 0 and stride < 0 */
+    //     else /* scale > 0 and stride < 0 */
     //       I > (upper_limit-(offset+1))/scale
     //   )
-    plus_one = new (C, 3) AddINode(offset, one);
+    //
+    // (upper_limit-offset-1) may underflow or overflow.
+    // To avoid it min(pre_limit, original_limit) is used
+    // in do_range_check() for stride > 0 and max() for < 0.
+    Node *one  = _igvn.intcon(1);
+    set_ctrl(one, C->root());
+
+    Node *plus_one = new (C, 3) AddINode(offset, one);
     register_new_node( plus_one, pre_ctrl );
-    con = new (C, 3) SubINode(upper_limit, plus_one);
-    register_new_node(con, pre_ctrl);
-    scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
 
-    // Adjust pre-loop last iteration
-    loop_limit = *pre_limit;
-    loop_limit = (stride_con > 0) // scale < 0
-      ? (Node*)(new (C, 3) MaxINode(loop_limit, X))
-      : (Node*)(new (C, 3) MinINode(loop_limit, X));
-    register_new_node( loop_limit, pre_ctrl );
-    *pre_limit = loop_limit;
+    if (low_limit->get_int() == -max_jint) {
+      if (!RangeLimitCheck) return;
+      // We need this guard when scale*main_limit+offset >= limit
+      // due to underflow. So we need execute main-loop while
+      // scale*I+offset+1 > min_int. But (min_int-offset-1) will
+      // underflow when (offset+1) > 0 and X will be < main_limit
+      // when scale < 0 (and stride > 0). To avoid it we replace
+      // positive (offset+1) with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
+      Node* shift = _igvn.intcon(31);
+      set_ctrl(shift, C->root());
+      Node* sign = new (C, 3) RShiftINode(plus_one, shift);
+      register_new_node(sign, pre_ctrl);
+      plus_one = new (C, 3) AndINode(plus_one, sign);
+      register_new_node(plus_one, pre_ctrl);
+    } else {
+      assert(low_limit->get_int() == 0, "wrong low limit for range check");
+      // The only problem we have here when offset == max_int
+      // since (max_int+1) == min_int and (0-min_int) == min_int.
+      // But it is fine since main loop will either have
+      // less iterations or will be skipped in such case.
+    }
+    // The underflow limit: low_limit <= scale*I+offset.
+    // For main-loop compute
+    //   scale*I+offset+1 > low_limit
+    //   ( if (scale < 0) /* and stride > 0 */
+    //       I < (low_limit-(offset+1))/scale
+    //     else /* scale > 0 and stride < 0 */
+    //       I > (low_limit-(offset+1))/scale
+    //   )
 
+    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
   }
 }
 
@@ -1869,13 +1899,8 @@
           // The underflow and overflow limits: 0 <= scale*I+offset < limit
           add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
-            // It is also needed if offset->_lo == min_int since
-            // (0-min_int) == min_int. It may be fine for stride > 0
-            // but for stride < 0 pre_limit will be < original_limit.
-            const TypeInt* offset_t = _igvn.type(offset)->is_int();
-            conditional_rc |= RangeLimitCheck && (offset_t->_lo == min_jint) &&
-                              (scale_con<0) && (stride_con<0);
+            // (0-offset)/scale could be outside of loop iterations range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
         } else {
 #ifndef PRODUCT
@@ -1905,16 +1930,14 @@
           // Fall into LT case
         case BoolTest::lt:
           // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
+          // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
+          // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
           add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
-            // It is also needed if scale*pre_limit+offset >= limit
-            // due to underflow so we need execute pre-loop until
-            // scale*I+offset >= min_int. But (low_limit-offset) will
-            // underflow when offset > 0 and X will be > original_limit.
-            const TypeInt* offset_t = _igvn.type(offset)->is_int();
-            conditional_rc |= RangeLimitCheck && (offset_t->_hi > 0) &&
-                              (scale_con>0) && (stride_con>0);
+            // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
+            // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
+            // still be outside of loop range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
           break;
         default:
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -453,7 +453,12 @@
   // Now we need to canonicalize loop condition.
   if (bt == BoolTest::ne) {
     assert(stride_con == 1 || stride_con == -1, "simple increment only");
-    bt = (stride_con > 0) ? BoolTest::lt : BoolTest::gt;
+    // 'ne' can be replaced with 'lt' only when init < limit.
+    if (stride_con > 0 && init_t->_hi < limit_t->_lo)
+      bt = BoolTest::lt;
+    // 'ne' can be replaced with 'gt' only when init > limit.
+    if (stride_con < 0 && init_t->_lo > limit_t->_hi)
+      bt = BoolTest::gt;
   }
 
   if (incl_limit) {
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -843,7 +843,7 @@
   void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
   // If Node n lives in the back_ctrl block, we clone a private version of n
   // in preheader_ctrl block and return that, otherwise return n.
-  Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n );
+  Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
 
   // Take steps to maximally unroll the loop.  Peel any odd iterations, then
   // unroll to do double iterations.  The next round of major loop transforms
@@ -932,6 +932,8 @@
   // the pre-loop or the post-loop until the condition holds true in the main
   // loop.  Scale_con, offset and limit are all loop invariant.
   void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
+  // Helper function for add_constraint().
+  Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
 
   // Partially peel loop up through last_peel node.
   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -391,13 +391,9 @@
     }
   }
   // Check if an appropriate new value phi already exists.
-  Node* new_phi = NULL;
-  uint size = value_phis->size();
-  for (uint i=0; i < size; i++) {
-    if ( mem->_idx == value_phis->index_at(i) ) {
-      return value_phis->node_at(i);
-    }
-  }
+  Node* new_phi = value_phis->find(mem->_idx);
+  if (new_phi != NULL)
+    return new_phi;
 
   if (level <= 0) {
     return NULL; // Give up: phi tree too deep
@@ -1693,25 +1689,31 @@
                          OptoRuntime::new_array_Java());
 }
 
-
-// we have determined that this lock/unlock can be eliminated, we simply
-// eliminate the node without expanding it.
-//
-// Note:  The membar's associated with the lock/unlock are currently not
-//        eliminated.  This should be investigated as a future enhancement.
-//
-bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
-
+//-----------------------mark_eliminated_locking_nodes-----------------------
+// During EA obj may point to several objects but after few ideal graph
+// transformations (CCP) it may point to only one non escaping object
+// (but still using phi), corresponding locks and unlocks will be marked
+// for elimination. Later obj could be replaced with a new node (new phi)
+// and which does not have escape information. And later after some graph
+// reshape other locks and unlocks (which were not marked for elimination
+// before) are connected to this new obj (phi) but they still will not be
+// marked for elimination since new obj has no escape information.
+// Mark all associated (same box and obj) lock and unlock nodes for
+// elimination if some of them marked already.
+void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
   if (!alock->is_eliminated()) {
-    return false;
+    return;
   }
-  if (alock->is_Lock() && !alock->is_coarsened()) {
+  if (!alock->is_coarsened()) { // Eliminated by EA
       // Create new "eliminated" BoxLock node and use it
       // in monitor debug info for the same object.
       BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
       Node* obj = alock->obj_node();
       if (!oldbox->is_eliminated()) {
         BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
+        // Note: BoxLock node is marked eliminated only here
+        // and it is used to indicate that all associated lock
+        // and unlock nodes are marked for elimination.
         newbox->set_eliminated();
         transform_later(newbox);
         // Replace old box node with new box for all users
@@ -1720,22 +1722,14 @@
 
           bool next_edge = true;
           Node* u = oldbox->raw_out(i);
-          if (u == alock) {
-            i++;
-            continue; // It will be removed below
-          }
-          if (u->is_Lock() &&
-              u->as_Lock()->obj_node() == obj &&
-              // oldbox could be referenced in debug info also
-              u->as_Lock()->box_node() == oldbox) {
-            assert(u->as_Lock()->is_eliminated(), "sanity");
+          if (u->is_AbstractLock() &&
+              u->as_AbstractLock()->obj_node() == obj &&
+              u->as_AbstractLock()->box_node() == oldbox) {
+            // Mark all associated locks and unlocks.
+            u->as_AbstractLock()->set_eliminated();
             _igvn.hash_delete(u);
             u->set_req(TypeFunc::Parms + 1, newbox);
             next_edge = false;
-#ifdef ASSERT
-          } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) {
-            assert(u->as_Unlock()->is_eliminated(), "sanity");
-#endif
           }
           // Replace old box in monitor debug info.
           if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
@@ -1761,8 +1755,27 @@
           if (next_edge) i++;
         } // for (uint i = 0; i < oldbox->outcnt();)
       } // if (!oldbox->is_eliminated())
-  } // if (alock->is_Lock() && !lock->is_coarsened())
+  } // if (!alock->is_coarsened())
+}
+
+// we have determined that this lock/unlock can be eliminated, we simply
+// eliminate the node without expanding it.
+//
+// Note:  The membar's associated with the lock/unlock are currently not
+//        eliminated.  This should be investigated as a future enhancement.
+//
+bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
 
+  if (!alock->is_eliminated()) {
+    return false;
+  }
+#ifdef ASSERT
+  if (alock->is_Lock() && !alock->is_coarsened()) {
+    // Check that new "eliminated" BoxLock node is created.
+    BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
+    assert(oldbox->is_eliminated(), "should be done already");
+  }
+#endif
   CompileLog* log = C->log();
   if (log != NULL) {
     log->head("eliminate_lock lock='%d'",
@@ -2145,6 +2158,15 @@
   if (C->macro_count() == 0)
     return false;
   // First, attempt to eliminate locks
+  int cnt = C->macro_count();
+  for (int i=0; i < cnt; i++) {
+    Node *n = C->macro_node(i);
+    if (n->is_AbstractLock()) { // Lock and Unlock nodes
+      // Before elimination mark all associated (same box and obj)
+      // lock and unlock nodes.
+      mark_eliminated_locking_nodes(n->as_AbstractLock());
+    }
+  }
   bool progress = true;
   while (progress) {
     progress = false;
--- a/hotspot/src/share/vm/opto/macro.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/macro.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -92,6 +92,7 @@
   void process_users_of_allocation(AllocateNode *alloc);
 
   void eliminate_card_mark(Node *cm);
+  void mark_eliminated_locking_nodes(AbstractLockNode *alock);
   bool eliminate_locking_node(AbstractLockNode *alock);
   void expand_lock_node(LockNode *lock);
   void expand_unlock_node(UnlockNode *unlock);
--- a/hotspot/src/share/vm/opto/matcher.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -52,6 +52,9 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc
+# include "adfiles/ad_ppc.hpp"
+#endif
 
 OptoReg::Name OptoReg::c_frame_pointer;
 
--- a/hotspot/src/share/vm/opto/node.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2012,6 +2012,16 @@
   _inode_top = _inodes + old_top;        // restore _top
 }
 
+// Node_Stack is used to map nodes.
+Node* Node_Stack::find(uint idx) const {
+  uint sz = size();
+  for (uint i=0; i < sz; i++) {
+    if (idx == index_at(i) )
+      return node_at(i);
+  }
+  return NULL;
+}
+
 //=============================================================================
 uint TypeNode::size_of() const { return sizeof(*this); }
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/node.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1463,6 +1463,9 @@
   bool is_nonempty() const { return (_inode_top >= _inodes); }
   bool is_empty() const { return (_inode_top < _inodes); }
   void clear() { _inode_top = _inodes - 1; } // retain storage
+
+  // Node_Stack is used to map nodes.
+  Node* find(uint idx) const;
 };
 
 
--- a/hotspot/src/share/vm/opto/output.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -911,7 +911,7 @@
         }
       } else {
         const TypePtr *tp = obj_node->bottom_type()->make_ptr();
-        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
+        scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
       }
 
       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
--- a/hotspot/src/share/vm/opto/parse.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -50,7 +50,7 @@
   // Always between 0.0 and 1.0.  Represents the percentage of the method's
   // total execution time used at this call site.
   const float _site_invoke_ratio;
-  const int   _site_depth_adjust;
+  const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
   float compute_callee_frequency( int caller_bci ) const;
 
   GrowableArray<InlineTree*> _subtrees;
@@ -63,7 +63,7 @@
              JVMState* caller_jvms,
              int caller_bci,
              float site_invoke_ratio,
-             int site_depth_adjust);
+             int max_inline_level);
   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
                                            JVMState* caller_jvms,
                                            int caller_bci);
@@ -74,7 +74,7 @@
 
   InlineTree *caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
-  int         inline_depth()      const { return stack_depth() + _site_depth_adjust; }
+  int         inline_level()      const { return stack_depth(); }
   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
 
 public:
@@ -82,7 +82,7 @@
   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
 
   // For temporary (stack-allocated, stateless) ilts:
-  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
+  InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
 
   // InlineTree enum
   enum InlineStyle {
--- a/hotspot/src/share/vm/opto/parse3.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/parse3.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -417,17 +417,10 @@
 
   // Note:  Array classes are always initialized; no is_initialized check.
 
-  enum { MAX_DIMENSION = 5 };
-  if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
-    uncommon_trap(Deoptimization::Reason_unhandled,
-                  Deoptimization::Action_none);
-    return;
-  }
-
   kill_dead_locals();
 
   // get the lengths from the stack (first dimension is on top)
-  Node* length[MAX_DIMENSION+1];
+  Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
   length[ndimensions] = NULL;  // terminating null for make_runtime_call
   int j;
   for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
@@ -470,20 +463,43 @@
 
   address fun = NULL;
   switch (ndimensions) {
-  //case 1: Actually, there is no case 1.  It's handled by new_array.
+  case 1: ShouldNotReachHere(); break;
   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
-  default: ShouldNotReachHere();
   };
+  Node* c = NULL;
 
-  Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
-                              OptoRuntime::multianewarray_Type(ndimensions),
-                              fun, NULL, TypeRawPtr::BOTTOM,
-                              makecon(TypeKlassPtr::make(array_klass)),
-                              length[0], length[1], length[2],
-                              length[3], length[4]);
+  if (fun != NULL) {
+    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
+                          OptoRuntime::multianewarray_Type(ndimensions),
+                          fun, NULL, TypeRawPtr::BOTTOM,
+                          makecon(TypeKlassPtr::make(array_klass)),
+                          length[0], length[1], length[2],
+                          length[3], length[4]);
+  } else {
+    // Create a java array for dimension sizes
+    Node* dims = NULL;
+    { PreserveReexecuteState preexecs(this);
+      _sp += ndimensions;
+      Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
+      dims = new_array(dims_array_klass, intcon(ndimensions), 0);
+
+      // Fill-in it with values
+      for (j = 0; j < ndimensions; j++) {
+        Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
+        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
+      }
+    }
+
+    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
+                          OptoRuntime::multianewarrayN_Type(),
+                          OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
+                          makecon(TypeKlassPtr::make(array_klass)),
+                          dims);
+  }
+
   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
 
   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
@@ -496,7 +512,7 @@
   if (ltype != NULL)
     type = type->is_aryptr()->cast_to_size(ltype);
 
-  // We cannot sharpen the nested sub-arrays, since the top level is mutable.
+    // We cannot sharpen the nested sub-arrays, since the top level is mutable.
 
   Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
   push(cast);
--- a/hotspot/src/share/vm/opto/runtime.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -106,6 +106,7 @@
 address OptoRuntime::_multianewarray3_Java                        = NULL;
 address OptoRuntime::_multianewarray4_Java                        = NULL;
 address OptoRuntime::_multianewarray5_Java                        = NULL;
+address OptoRuntime::_multianewarrayN_Java                        = NULL;
 address OptoRuntime::_g1_wb_pre_Java                              = NULL;
 address OptoRuntime::_g1_wb_post_Java                             = NULL;
 address OptoRuntime::_vtable_must_compile_Java                    = NULL;
@@ -120,6 +121,7 @@
 address OptoRuntime::_zap_dead_native_locals_Java                 = NULL;
 # endif
 
+ExceptionBlob* OptoRuntime::_exception_blob;
 
 // This should be called in an assertion at the start of OptoRuntime routines
 // which are entered from compiled code (all of them)
@@ -153,6 +155,7 @@
   gen(env, _multianewarray3_Java           , multianewarray3_Type         , multianewarray3_C               ,    0 , true , false, false);
   gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true , false, false);
   gen(env, _multianewarray5_Java           , multianewarray5_Type         , multianewarray5_C               ,    0 , true , false, false);
+  gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
   gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
@@ -373,6 +376,24 @@
   thread->set_vm_result(obj);
 JRT_END
 
+JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(klassOopDesc* elem_type, arrayOopDesc* dims, JavaThread *thread))
+  assert(check_compiled_frame(thread), "incorrect caller");
+  assert(oop(elem_type)->is_klass(), "not a class");
+  assert(oop(dims)->is_typeArray(), "not an array");
+
+  ResourceMark rm;
+  jint len = dims->length();
+  assert(len > 0, "Dimensions array should contain data");
+  jint *j_dims = typeArrayOop(dims)->int_at_addr(0);
+  jint *c_dims = NEW_RESOURCE_ARRAY(jint, len);
+  Copy::conjoint_jints_atomic(j_dims, c_dims, len);
+
+  oop obj = arrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD);
+  deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION);
+  thread->set_vm_result(obj);
+JRT_END
+
+
 const TypeFunc *OptoRuntime::new_instance_Type() {
   // create input type (domain)
   const Type **fields = TypeTuple::fields(1);
@@ -453,6 +474,21 @@
   return multianewarray_Type(5);
 }
 
+const TypeFunc *OptoRuntime::multianewarrayN_Type() {
+  // create input type (domain)
+  const Type **fields = TypeTuple::fields(2);
+  fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;   // element klass
+  fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL;   // array of dim sizes
+  const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
+
+  // create result type (range)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
+  const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
+
+  return TypeFunc::make(domain, range);
+}
+
 const TypeFunc *OptoRuntime::g1_wb_pre_Type() {
   const Type **fields = TypeTuple::fields(2);
   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
--- a/hotspot/src/share/vm/opto/runtime.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -118,6 +118,7 @@
   static address _multianewarray3_Java;
   static address _multianewarray4_Java;
   static address _multianewarray5_Java;
+  static address _multianewarrayN_Java;
   static address _g1_wb_pre_Java;
   static address _g1_wb_post_Java;
   static address _vtable_must_compile_Java;
@@ -153,6 +154,7 @@
   static void multianewarray3_C(klassOopDesc* klass, int len1, int len2, int len3, JavaThread *thread);
   static void multianewarray4_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, JavaThread *thread);
   static void multianewarray5_C(klassOopDesc* klass, int len1, int len2, int len3, int len4, int len5, JavaThread *thread);
+  static void multianewarrayN_C(klassOopDesc* klass, arrayOopDesc* dims, JavaThread *thread);
   static void g1_wb_pre_C(oopDesc* orig, JavaThread* thread);
   static void g1_wb_post_C(void* card_addr, JavaThread* thread);
 
@@ -210,6 +212,7 @@
   static address multianewarray3_Java()                  { return _multianewarray3_Java; }
   static address multianewarray4_Java()                  { return _multianewarray4_Java; }
   static address multianewarray5_Java()                  { return _multianewarray5_Java; }
+  static address multianewarrayN_Java()                  { return _multianewarrayN_Java; }
   static address g1_wb_pre_Java()                        { return _g1_wb_pre_Java; }
   static address g1_wb_post_Java()                       { return _g1_wb_post_Java; }
   static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }
@@ -249,6 +252,7 @@
   static const TypeFunc* multianewarray3_Type(); // multianewarray
   static const TypeFunc* multianewarray4_Type(); // multianewarray
   static const TypeFunc* multianewarray5_Type(); // multianewarray
+  static const TypeFunc* multianewarrayN_Type(); // multianewarray
   static const TypeFunc* g1_wb_pre_Type();
   static const TypeFunc* g1_wb_post_Type();
   static const TypeFunc* complete_monitor_enter_Type();
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -768,6 +768,7 @@
         tty->cr();
       }
 #endif
+      fail = true;
       break;
     } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
       ptr = ptr->in(0)->in(0);
--- a/hotspot/src/share/vm/opto/subnode.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1101,6 +1101,7 @@
   if( cmp2_type == TypeInt::ZERO &&
       cmp1_op == Op_XorI &&
       j_xor->in(1) != j_xor &&          // An xor of itself is dead
+      phase->type( j_xor->in(1) ) == TypeInt::BOOL &&
       phase->type( j_xor->in(2) ) == TypeInt::ONE &&
       (_test._test == BoolTest::eq ||
        _test._test == BoolTest::ne) ) {
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -70,15 +70,6 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
-#ifdef TARGET_ARCH_x86
-# include "jniTypes_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "jniTypes_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "jniTypes_zero.hpp"
-#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 # include "thread_linux.inline.hpp"
@@ -3296,6 +3287,19 @@
   return ret;
 }
 
+#ifndef PRODUCT
+
+#include "utilities/quickSort.hpp"
+
+void execute_internal_vm_tests() {
+  if (ExecuteInternalVMTests) {
+    assert(QuickSort::test_quick_sort(), "test_quick_sort failed");
+    tty->print_cr("All tests passed");
+  }
+}
+
+#endif
+
 HS_DTRACE_PROBE_DECL3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
 DT_RETURN_MARK_DECL(CreateJavaVM, jint);
 
@@ -3386,6 +3390,7 @@
   }
 
   NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
+  NOT_PRODUCT(execute_internal_vm_tests());
   return result;
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -992,6 +992,9 @@
     }
 
     Rewriter::rewrite(scratch_class, THREAD);
+    if (!HAS_PENDING_EXCEPTION) {
+      Rewriter::relocate_and_link(scratch_class, THREAD);
+    }
     if (HAS_PENDING_EXCEPTION) {
       Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
       CLEAR_PENDING_EXCEPTION;
--- a/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -141,6 +141,12 @@
 
 void MethodHandleChain::lose(const char* msg, TRAPS) {
   _lose_message = msg;
+#ifdef ASSERT
+  if (Verbose) {
+    tty->print_cr(INTPTR_FORMAT " lose: %s", _method_handle(), msg);
+    print();
+  }
+#endif
   if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
     // throw a preallocated exception
     THROW_OOP(Universe::virtual_machine_error_instance());
@@ -149,6 +155,155 @@
 }
 
 
+#ifdef ASSERT
+static const char* adapter_ops[] = {
+  "retype_only"  ,
+  "retype_raw"   ,
+  "check_cast"   ,
+  "prim_to_prim" ,
+  "ref_to_prim"  ,
+  "prim_to_ref"  ,
+  "swap_args"    ,
+  "rot_args"     ,
+  "dup_args"     ,
+  "drop_args"    ,
+  "collect_args" ,
+  "spread_args"  ,
+  "fold_args"
+};
+
+static const char* adapter_op_to_string(int op) {
+  if (op >= 0 && op < (int)ARRAY_SIZE(adapter_ops))
+    return adapter_ops[op];
+  return "unknown_op";
+}
+
+void MethodHandleChain::print(oopDesc* m) {
+  HandleMark hm;
+  ResourceMark rm;
+  Handle mh(m);
+  print(mh);
+}
+
+void MethodHandleChain::print(Handle mh) {
+  EXCEPTION_MARK;
+  MethodHandleChain mhc(mh, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = THREAD->pending_exception();
+    CLEAR_PENDING_EXCEPTION;
+    ex->print();
+    return;
+  }
+  mhc.print();
+}
+
+
+void MethodHandleChain::print() {
+  EXCEPTION_MARK;
+  print_impl(THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = THREAD->pending_exception();
+    CLEAR_PENDING_EXCEPTION;
+    ex->print();
+  }
+}
+
+void MethodHandleChain::print_impl(TRAPS) {
+  ResourceMark rm;
+
+  MethodHandleChain chain(_root, CHECK);
+  for (;;) {
+    tty->print(INTPTR_FORMAT ": ", chain.method_handle()());
+    if (chain.is_bound()) {
+      tty->print("bound: arg_type %s arg_slot %d",
+                 type2name(chain.bound_arg_type()),
+                 chain.bound_arg_slot());
+      oop o = chain.bound_arg_oop();
+      if (o != NULL) {
+        if (o->is_instance()) {
+          tty->print(" instance %s", o->klass()->klass_part()->internal_name());
+        } else {
+          o->print();
+        }
+      }
+    } else if (chain.is_adapter()) {
+      tty->print("adapter: arg_slot %d conversion op %s",
+                 chain.adapter_arg_slot(),
+                 adapter_op_to_string(chain.adapter_conversion_op()));
+      switch (chain.adapter_conversion_op()) {
+        case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY:
+        case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW:
+        case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST:
+        case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM:
+        case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM:
+          break;
+
+        case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: {
+          tty->print(" src_type = %s", type2name(chain.adapter_conversion_src_type()));
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: {
+          int dest_arg_slot = chain.adapter_conversion_vminfo();
+          tty->print(" dest_arg_slot %d type %s", dest_arg_slot, type2name(chain.adapter_conversion_src_type()));
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: {
+          int dup_slots = chain.adapter_conversion_stack_pushes();
+          tty->print(" pushes %d", dup_slots);
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS:
+        case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: {
+          int coll_slots = chain.MethodHandle_vmslots();
+          tty->print(" coll_slots %d", coll_slots);
+          break;
+        }
+
+        case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: {
+          // Check the required length.
+          int spread_slots = 1 + chain.adapter_conversion_stack_pushes();
+          tty->print(" spread_slots %d", spread_slots);
+          break;
+        }
+
+        default:
+          tty->print_cr("bad adapter conversion");
+          break;
+      }
+    } else {
+      // DMH
+      tty->print("direct: ");
+      chain.last_method_oop()->print_short_name(tty);
+    }
+
+    tty->print(" (");
+    objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain.method_type_oop());
+    for (int i = ptypes->length() - 1; i >= 0; i--) {
+      BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i));
+      if (t == T_ARRAY) t = T_OBJECT;
+      tty->print("%c", type2char(t));
+      if (t == T_LONG || t == T_DOUBLE) tty->print("_");
+    }
+    tty->print(")");
+    BasicType rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(chain.method_type_oop()));
+    if (rtype == T_ARRAY) rtype = T_OBJECT;
+    tty->print("%c", type2char(rtype));
+    tty->cr();
+    if (!chain.is_last()) {
+      chain.next(CHECK);
+    } else {
+      break;
+    }
+  }
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // MethodHandleWalker
 
@@ -205,10 +360,16 @@
     if (chain().is_adapter()) {
       int conv_op = chain().adapter_conversion_op();
       int arg_slot = chain().adapter_arg_slot();
-      SlotState* arg_state = slot_state(arg_slot);
-      if (arg_state == NULL
-          && conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) {
-        lose("bad argument index", CHECK_(empty));
+
+      // Check that the arg_slot is valid.  In most cases it must be
+      // within range of the current arguments but there are some
+      // exceptions.  Those are sanity checked in their implemention
+      // below.
+      if ((arg_slot < 0 || arg_slot >= _outgoing.length()) &&
+          conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW &&
+          conv_op != java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS &&
+          conv_op != java_lang_invoke_AdapterMethodHandle::OP_FOLD_ARGS) {
+        lose(err_msg("bad argument index %d", arg_slot), CHECK_(empty));
       }
 
       bool retain_original_args = false;  // used by fold/collect logic
@@ -237,8 +398,7 @@
 
         // Argument types.
         for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) {
-          SlotState* arg_state = slot_state(slot);
-          if (arg_state->_type == T_VOID)  continue;
+          if (arg_type(slot) == T_VOID)  continue;
 
           klassOop  src_klass = NULL;
           klassOop  dst_klass = NULL;
@@ -262,10 +422,11 @@
         klassOop dest_klass = NULL;
         BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass);
         assert(dest == T_OBJECT, "");
-        assert(dest == arg_state->_type, "");
-        ArgToken arg = arg_state->_arg;
-        ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
-        assert(arg.token_type() >= tt_symbolic || arg.index() == new_arg.index(), "should be the same index");
+        ArgToken arg = _outgoing.at(arg_slot);
+        assert(dest == arg.basic_type(), "");
+        arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
+        // replace the object by the result of the cast, to make the compiler happy:
+        change_argument(T_OBJECT, arg_slot, T_OBJECT, arg);
         debug_only(dest_klass = (klassOop)badOop);
         break;
       }
@@ -274,8 +435,8 @@
         // i2l, etc., on the Nth outgoing argument in place
         BasicType src = chain().adapter_conversion_src_type(),
                   dest = chain().adapter_conversion_dest_type();
+        ArgToken arg = _outgoing.at(arg_slot);
         Bytecodes::Code bc = conversion_code(src, dest);
-        ArgToken arg = arg_state->_arg;
         if (bc == Bytecodes::_nop) {
           break;
         } else if (bc != Bytecodes::_illegal) {
@@ -289,7 +450,7 @@
           }
         }
         if (bc == Bytecodes::_illegal) {
-          lose("bad primitive conversion", CHECK_(empty));
+          lose(err_msg("bad primitive conversion for %s -> %s", type2name(src), type2name(dest)), CHECK_(empty));
         }
         change_argument(src, arg_slot, dest, arg);
         break;
@@ -298,7 +459,7 @@
       case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: {
         // checkcast to wrapper type & call intValue, etc.
         BasicType dest = chain().adapter_conversion_dest_type();
-        ArgToken arg = arg_state->_arg;
+        ArgToken arg = _outgoing.at(arg_slot);
         arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest),
                               Bytecodes::_checkcast, arg, CHECK_(empty));
         vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest);
@@ -308,7 +469,7 @@
         ArgToken arglist[2];
         arglist[0] = arg;         // outgoing 'this'
         arglist[1] = ArgToken();  // sentinel
-        arg = make_invoke(NULL, unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
+        arg = make_invoke(methodHandle(), unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
         change_argument(T_OBJECT, arg_slot, dest, arg);
         break;
       }
@@ -316,55 +477,63 @@
       case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: {
         // call wrapper type.valueOf
         BasicType src = chain().adapter_conversion_src_type();
-        ArgToken arg = arg_state->_arg;
         vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src);
         if (boxer == vmIntrinsics::_none) {
           lose("no boxing method", CHECK_(empty));
         }
+        ArgToken arg = _outgoing.at(arg_slot);
         ArgToken arglist[2];
         arglist[0] = arg;         // outgoing value
         arglist[1] = ArgToken();  // sentinel
-        arg = make_invoke(NULL, boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
+        arg = make_invoke(methodHandle(), boxer, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
         change_argument(src, arg_slot, T_OBJECT, arg);
         break;
       }
 
       case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: {
         int dest_arg_slot = chain().adapter_conversion_vminfo();
-        if (!slot_has_argument(dest_arg_slot)) {
+        if (!has_argument(dest_arg_slot)) {
           lose("bad swap index", CHECK_(empty));
         }
         // a simple swap between two arguments
-        SlotState* dest_arg_state = slot_state(dest_arg_slot);
-        SlotState temp = (*dest_arg_state);
-        (*dest_arg_state) = (*arg_state);
-        (*arg_state) = temp;
+        if (arg_slot > dest_arg_slot) {
+          int tmp = arg_slot;
+          arg_slot = dest_arg_slot;
+          dest_arg_slot = tmp;
+        }
+        ArgToken a1 = _outgoing.at(arg_slot);
+        ArgToken a2 = _outgoing.at(dest_arg_slot);
+        change_argument(a2.basic_type(), dest_arg_slot, a1);
+        change_argument(a1.basic_type(), arg_slot, a2);
         break;
       }
 
       case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: {
-        int dest_arg_slot = chain().adapter_conversion_vminfo();
-        if (!slot_has_argument(dest_arg_slot) || arg_slot == dest_arg_slot) {
+        int limit_raw  = chain().adapter_conversion_vminfo();
+        bool rot_down  = (arg_slot < limit_raw);
+        int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0);
+        int limit_slot = limit_raw - limit_bias;
+        if ((uint)limit_slot > (uint)_outgoing.length()) {
           lose("bad rotate index", CHECK_(empty));
         }
-        SlotState* dest_arg_state = slot_state(dest_arg_slot);
         // Rotate the source argument (plus following N slots) into the
         // position occupied by the dest argument (plus following N slots).
-        int rotate_count = type2size[dest_arg_state->_type];
+        int rotate_count = type2size[chain().adapter_conversion_src_type()];
         // (no other rotate counts are currently supported)
-        if (arg_slot < dest_arg_slot) {
+        if (rot_down) {
           for (int i = 0; i < rotate_count; i++) {
-            SlotState temp = _outgoing.at(arg_slot);
+            ArgToken temp = _outgoing.at(arg_slot);
             _outgoing.remove_at(arg_slot);
-            _outgoing.insert_before(dest_arg_slot + rotate_count - 1, temp);
+            _outgoing.insert_before(limit_slot - 1, temp);
           }
-        } else { // arg_slot > dest_arg_slot
+        } else { // arg_slot > limit_slot => rotate_up
           for (int i = 0; i < rotate_count; i++) {
-            SlotState temp = _outgoing.at(arg_slot + rotate_count - 1);
+            ArgToken temp = _outgoing.at(arg_slot + rotate_count - 1);
             _outgoing.remove_at(arg_slot + rotate_count - 1);
-            _outgoing.insert_before(dest_arg_slot, temp);
+            _outgoing.insert_before(limit_slot, temp);
           }
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
@@ -374,11 +543,11 @@
           lose("bad dup count", CHECK_(empty));
         }
         for (int i = 0; i < dup_slots; i++) {
-          SlotState* dup = slot_state(arg_slot + 2*i);
-          if (dup == NULL)              break;  // safety net
-          if (dup->_type != T_VOID)     _outgoing_argc += 1;
-          _outgoing.insert_before(i, (*dup));
+          ArgToken dup = _outgoing.at(arg_slot + 2*i);
+          if (dup.basic_type() != T_VOID)     _outgoing_argc += 1;
+          _outgoing.insert_before(i, dup);
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
@@ -388,11 +557,11 @@
           lose("bad drop count", CHECK_(empty));
         }
         for (int i = 0; i < drop_slots; i++) {
-          SlotState* drop = slot_state(arg_slot);
-          if (drop == NULL)             break;  // safety net
-          if (drop->_type != T_VOID)    _outgoing_argc -= 1;
+          ArgToken drop = _outgoing.at(arg_slot);
+          if (drop.basic_type() != T_VOID)    _outgoing_argc -= 1;
           _outgoing.remove_at(arg_slot);
         }
+        assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
         break;
       }
 
@@ -415,14 +584,15 @@
           lose("bad fold/collect arg slot", CHECK_(empty));
         }
         for (int i = 0, slot = arg_slot + coll_slots - 1; slot >= arg_slot; slot--) {
-          SlotState* arg_state = slot_state(slot);
-          BasicType  arg_type  = arg_state->_type;
+          ArgToken arg_state = _outgoing.at(slot);
+          BasicType  arg_type  = arg_state.basic_type();
           if (arg_type == T_VOID)  continue;
-          ArgToken arg = _outgoing.at(slot)._arg;
+          ArgToken arg = _outgoing.at(slot);
           if (i >= argc) { lose("bad fold/collect arg", CHECK_(empty)); }
           arglist[1+i] = arg;
           if (!retain_original_args)
             change_argument(arg_type, slot, T_VOID, ArgToken(tt_void));
+          i++;
         }
         arglist[1+argc] = ArgToken();  // sentinel
         oop invoker = java_lang_invoke_MethodTypeForm::vmlayout(
@@ -431,8 +601,9 @@
           lose("bad vmlayout slot", CHECK_(empty));
         }
         // FIXME: consider inlining the invokee at the bytecode level
-        ArgToken ret = make_invoke(methodOop(invoker), vmIntrinsics::_none,
+        ArgToken ret = make_invoke(methodHandle(THREAD, methodOop(invoker)), vmIntrinsics::_invokeGeneric,
                                    Bytecodes::_invokevirtual, false, 1+argc, &arglist[0], CHECK_(empty));
+        // The iid = _invokeGeneric really means to adjust reference types as needed.
         DEBUG_ONLY(invoker = NULL);
         if (rtype == T_OBJECT) {
           klassOop rklass = java_lang_Class::as_klassOop( java_lang_invoke_MethodType::rtype(recursive_mtype()) );
@@ -442,8 +613,10 @@
             ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty));
           }
         }
-        int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0);
-        change_argument(T_VOID, ret_slot, rtype, ret);
+        if (rtype != T_VOID) {
+          int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0);
+          change_argument(T_VOID, ret_slot, rtype, ret);
+        }
         break;
       }
 
@@ -463,8 +636,9 @@
         debug_only(element_klass_oop = (klassOop)badOop);
 
         // Fetch the argument, which we will cast to the required array type.
-        assert(arg_state->_type == T_OBJECT, "");
-        ArgToken array_arg = arg_state->_arg;
+        ArgToken arg = _outgoing.at(arg_slot);
+        assert(arg.basic_type() == T_OBJECT, "");
+        ArgToken array_arg = arg;
         array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty));
         change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void));
 
@@ -486,8 +660,8 @@
         arglist[0] = array_arg;   // value to check
         arglist[1] = length_arg;  // length to check
         arglist[2] = ArgToken();  // sentinel
-        make_invoke(NULL, vmIntrinsics::_checkSpreadArgument,
-                    Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
+        make_invoke(methodHandle(), vmIntrinsics::_checkSpreadArgument,
+                    Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty));
 
         // Spread out the array elements.
         Bytecodes::Code aload_op = Bytecodes::_nop;
@@ -509,7 +683,7 @@
           ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty));
           ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty));
           change_argument(T_VOID, ap, element_type, element_arg);
-          ap += type2size[element_type];
+          //ap += type2size[element_type];  // don't do this; insert next arg to *right* of previous
         }
         break;
       }
@@ -531,10 +705,10 @@
       } else {
         jvalue arg_value;
         BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value);
-        if (bt == arg_type) {
+        if (bt == arg_type || (bt == T_INT && is_subword_type(arg_type))) {
           arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty));
         } else {
-          lose("bad bound value", CHECK_(empty));
+          lose(err_msg("bad bound value: arg_type %s boxing %s", type2name(arg_type), type2name(bt)), CHECK_(empty));
         }
       }
       DEBUG_ONLY(arg_oop = badOop);
@@ -554,13 +728,13 @@
   ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1);
   int ap = 0;
   for (int i = _outgoing.length() - 1; i >= 0; i--) {
-    SlotState* arg_state = slot_state(i);
-    if (arg_state->_type == T_VOID)  continue;
-    arglist[ap++] = _outgoing.at(i)._arg;
+    ArgToken arg_state = _outgoing.at(i);
+    if (arg_state.basic_type() == T_VOID)  continue;
+    arglist[ap++] = _outgoing.at(i);
   }
   assert(ap == _outgoing_argc, "");
   arglist[ap] = ArgToken();  // add a sentinel, for the sake of asserts
-  return make_invoke(chain().last_method_oop(),
+  return make_invoke(chain().last_method(),
                      vmIntrinsics::_none,
                      chain().last_invoke_code(), true,
                      ap, arglist, THREAD);
@@ -576,7 +750,7 @@
   _outgoing_argc = nptypes;
   int argp = nptypes - 1;
   if (argp >= 0) {
-    _outgoing.at_grow(argp, make_state(T_VOID, ArgToken(tt_void))); // presize
+    _outgoing.at_grow(argp, ArgToken(tt_void)); // presize
   }
   for (int i = 0; i < nptypes; i++) {
     klassOop  arg_type_klass = NULL;
@@ -584,10 +758,10 @@
     int index = new_local_index(arg_type);
     ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK);
     DEBUG_ONLY(arg_type_klass = (klassOop) NULL);
-    _outgoing.at_put(argp, make_state(arg_type, arg));
+    _outgoing.at_put(argp, arg);
     if (type2size[arg_type] == 2) {
       // add the extra slot, so we can model the JVM stack
-      _outgoing.insert_before(argp+1, make_state(T_VOID, ArgToken(tt_void)));
+      _outgoing.insert_before(argp+1, ArgToken(tt_void));
     }
     --argp;
   }
@@ -596,38 +770,61 @@
   BasicType ret_type = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass);
   ArgToken  ret = make_parameter(ret_type, ret_type_klass, -1, CHECK);
   // ignore ret; client can catch it if needed
+
+  assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
+
+  verify_args_and_signature(CHECK);
 }
 
 
+#ifdef ASSERT
+void MethodHandleWalker::verify_args_and_signature(TRAPS) {
+  int index = _outgoing.length() - 1;
+  objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(chain().method_type_oop());
+  for (int i = 0, limit = ptypes->length(); i < limit; i++) {
+    BasicType t = java_lang_Class::as_BasicType(ptypes->obj_at(i));
+    if (t == T_ARRAY) t = T_OBJECT;
+    if (t == T_LONG || t == T_DOUBLE) {
+      assert(T_VOID == _outgoing.at(index).basic_type(), "types must match");
+      index--;
+    }
+    assert(t == _outgoing.at(index).basic_type(), "types must match");
+    index--;
+  }
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // MethodHandleWalker::change_argument
 //
 // This is messy because some kinds of arguments are paired with
 // companion slots containing an empty value.
-void MethodHandleWalker::change_argument(BasicType old_type, int slot, BasicType new_type,
-                                         const ArgToken& new_arg) {
+void MethodHandleWalker::change_argument(BasicType old_type, int slot, const ArgToken& new_arg) {
+  BasicType new_type = new_arg.basic_type();
   int old_size = type2size[old_type];
   int new_size = type2size[new_type];
   if (old_size == new_size) {
     // simple case first
-    _outgoing.at_put(slot, make_state(new_type, new_arg));
+    _outgoing.at_put(slot, new_arg);
   } else if (old_size > new_size) {
     for (int i = old_size - 1; i >= new_size; i--) {
-      assert((i != 0) == (_outgoing.at(slot + i)._type == T_VOID), "");
+      assert((i != 0) == (_outgoing.at(slot + i).basic_type() == T_VOID), "");
       _outgoing.remove_at(slot + i);
     }
     if (new_size > 0)
-      _outgoing.at_put(slot, make_state(new_type, new_arg));
+      _outgoing.at_put(slot, new_arg);
     else
       _outgoing_argc -= 1;      // deleted a real argument
   } else {
     for (int i = old_size; i < new_size; i++) {
-      _outgoing.insert_before(slot + i, make_state(T_VOID, ArgToken(tt_void)));
+      _outgoing.insert_before(slot + i, ArgToken(tt_void));
     }
-    _outgoing.at_put(slot, make_state(new_type, new_arg));
+    _outgoing.at_put(slot, new_arg);
     if (old_size == 0)
       _outgoing_argc += 1;      // inserted a real argument
   }
+  assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
 }
 
 
@@ -635,8 +832,15 @@
 int MethodHandleWalker::argument_count_slow() {
   int args_seen = 0;
   for (int i = _outgoing.length() - 1; i >= 0; i--) {
-    if (_outgoing.at(i)._type != T_VOID) {
+    if (_outgoing.at(i).basic_type() != T_VOID) {
       ++args_seen;
+      if (_outgoing.at(i).basic_type() == T_LONG ||
+          _outgoing.at(i).basic_type() == T_DOUBLE) {
+        assert(_outgoing.at(i + 1).basic_type() == T_VOID, "should only follow two word");
+      }
+    } else {
+      assert(_outgoing.at(i - 1).basic_type() == T_LONG ||
+             _outgoing.at(i - 1).basic_type() == T_DOUBLE, "should only follow two word");
     }
   }
   return args_seen;
@@ -652,7 +856,6 @@
   if (src != dst) {
     if (MethodHandles::same_basic_type_for_returns(src, dst, /*raw*/ true)) {
       if (MethodHandles::is_float_fixed_reinterpretation_cast(src, dst)) {
-        if (for_return)  Untested("MHW return raw conversion");  // still untested
         vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(src, dst);
         if (iid == vmIntrinsics::_none) {
           lose("no raw conversion method", CHECK);
@@ -660,27 +863,33 @@
         ArgToken arglist[2];
         if (!for_return) {
           // argument type conversion
-          ArgToken arg = _outgoing.at(slot)._arg;
+          ArgToken arg = _outgoing.at(slot);
           assert(arg.token_type() >= tt_symbolic || src == arg.basic_type(), "sanity");
           arglist[0] = arg;         // outgoing 'this'
           arglist[1] = ArgToken();  // sentinel
-          arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK);
+          arg = make_invoke(methodHandle(), iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK);
           change_argument(src, slot, dst, arg);
         } else {
           // return type conversion
-          klassOop arg_klass = NULL;
-          arglist[0] = make_parameter(src, arg_klass, -1, CHECK);  // return value
-          arglist[1] = ArgToken();                                 // sentinel
-          (void) make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK);
+          if (_return_conv == vmIntrinsics::_none) {
+            _return_conv = iid;
+          } else if (_return_conv == vmIntrinsics::for_raw_conversion(dst, src)) {
+            _return_conv = vmIntrinsics::_none;
+          } else if (_return_conv != zero_return_conv()) {
+            lose(err_msg("requested raw return conversion not allowed: %s -> %s (before %s)", type2name(src), type2name(dst), vmIntrinsics::name_at(_return_conv)), CHECK);
+          }
         }
       } else {
         // Nothing to do.
       }
+    } else if (for_return && (!is_subword_type(src) || !is_subword_type(dst))) {
+      // This can occur in exception-throwing MHs, which have a fictitious return value encoded as Void or Empty.
+      _return_conv = zero_return_conv();
     } else if (src == T_OBJECT && is_java_primitive(dst)) {
       // ref-to-prim: discard ref, push zero
       lose("requested ref-to-prim conversion not expected", CHECK);
     } else {
-      lose("requested raw conversion not allowed", CHECK);
+      lose(err_msg("requested raw conversion not allowed: %s -> %s", type2name(src), type2name(dst)), CHECK);
     }
   }
 }
@@ -689,13 +898,13 @@
 // -----------------------------------------------------------------------------
 // MethodHandleCompiler
 
-MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool is_invokedynamic, TRAPS)
+MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS)
   : MethodHandleWalker(root, is_invokedynamic, THREAD),
-    _callee(callee),
     _invoke_count(invoke_count),
     _thread(THREAD),
     _bytecode(THREAD, 50),
     _constants(THREAD, 10),
+    _non_bcp_klasses(THREAD, 5),
     _cur_stack(0),
     _max_stack(0),
     _rtype(T_ILLEGAL)
@@ -705,8 +914,17 @@
   (void) _constants.append(NULL);
 
   // Set name and signature index.
-  _name_index      = cpool_symbol_put(_callee->name());
-  _signature_index = cpool_symbol_put(_callee->signature());
+  _name_index      = cpool_symbol_put(name);
+  _signature_index = cpool_symbol_put(signature);
+
+  // To make the resulting methods more recognizable by
+  // stack walkers and compiler heuristics,
+  // we put them in holder class MethodHandle.
+  // See klass_is_method_handle_adapter_holder
+  // and methodOopDesc::is_method_handle_adapter.
+  _target_klass = SystemDictionaryHandles::MethodHandle_klass();
+
+  check_non_bcp_klasses(java_lang_invoke_MethodHandle::type(root()), CHECK);
 
   // Get return type klass.
   Handle first_mtype(THREAD, chain().method_type_oop());
@@ -714,7 +932,8 @@
   _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass);
   if (_rtype == T_ARRAY)  _rtype = T_OBJECT;
 
-  int params = _callee->size_of_parameters();  // Incoming arguments plus receiver.
+  ArgumentSizeComputer args(signature);
+  int params = args.size() + 1;  // Incoming arguments plus receiver.
   _num_params = for_invokedynamic() ? params - 1 : params;  // XXX Check if callee is static?
 }
 
@@ -728,11 +947,12 @@
   assert(_thread == THREAD, "must be same thread");
   methodHandle nullHandle;
   (void) walk(CHECK_(nullHandle));
+  record_non_bcp_klasses();
   return get_method_oop(CHECK_(nullHandle));
 }
 
 
-void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
+void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) {
   Bytecodes::check(op);  // Are we legal?
 
   switch (op) {
@@ -808,6 +1028,14 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
+  case Bytecodes::_iaload:
+  case Bytecodes::_laload:
+  case Bytecodes::_faload:
+  case Bytecodes::_daload:
+  case Bytecodes::_aaload:
+  case Bytecodes::_baload:
+  case Bytecodes::_caload:
+  case Bytecodes::_saload:
   case Bytecodes::_ireturn:
   case Bytecodes::_lreturn:
   case Bytecodes::_freturn:
@@ -821,9 +1049,14 @@
   // bi
   case Bytecodes::_ldc:
     assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      _bytecode.push(Bytecodes::_ldc_w);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   case Bytecodes::_iload:
@@ -837,9 +1070,16 @@
   case Bytecodes::_dstore:
   case Bytecodes::_astore:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      // doesn't fit in a u2
+      _bytecode.push(Bytecodes::_wide);
+      _bytecode.push(op);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   // bkk
@@ -847,7 +1087,7 @@
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
@@ -858,12 +1098,23 @@
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokevirtual:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
     break;
 
+  case Bytecodes::_invokeinterface:
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
+    assert(args_size > 0, "valid args_size");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    _bytecode.push(args_size);
+    _bytecode.push(0);
+    break;
+
   default:
     ShouldNotReachHere();
   }
@@ -929,6 +1180,7 @@
 
 void MethodHandleCompiler::emit_load_constant(ArgToken arg) {
   BasicType bt = arg.basic_type();
+  if (is_subword_type(bt)) bt = T_INT;
   switch (bt) {
   case T_INT: {
     jint value = arg.get_jint();
@@ -964,10 +1216,18 @@
   }
   case T_OBJECT: {
     Handle value = arg.object();
-    if (value.is_null())
+    if (value.is_null()) {
       emit_bc(Bytecodes::_aconst_null);
-    else
-      emit_bc(Bytecodes::_ldc, cpool_object_put(value));
+      break;
+    }
+    if (java_lang_Class::is_instance(value())) {
+      klassOop k = java_lang_Class::as_klassOop(value());
+      if (k != NULL) {
+        emit_bc(Bytecodes::_ldc, cpool_klass_put(k));
+        break;
+      }
+    }
+    emit_bc(Bytecodes::_ldc, cpool_object_put(value));
     break;
   }
   default:
@@ -982,7 +1242,8 @@
                                       const ArgToken& src, TRAPS) {
 
   BasicType srctype = src.basic_type();
-  int index = src.index();
+  TokenType tt = src.token_type();
+  int index = -1;
 
   switch (op) {
   case Bytecodes::_i2l:
@@ -1003,26 +1264,45 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+    }
     stack_pop(srctype);  // pop the src type
     emit_bc(op);
     stack_push(type);    // push the dest value
-    if (srctype != type)
+    if (tt != tt_constant)
+      index = src.index();
+    if (srctype != type || index == -1)
       index = new_local_index(type);
     emit_store(type, index);
     break;
 
   case Bytecodes::_checkcast:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+      index = src.index();
+    }
     emit_bc(op, cpool_klass_put(tk));
+    check_non_bcp_klass(tk, CHECK_(src));
+    // Allocate a new local for the type so that we don't hide the
+    // previous type from the verifier.
+    index = new_local_index(type);
     emit_store(srctype, index);
     break;
 
+  case Bytecodes::_nop:
+    // nothing to do
+    return src;
+
   default:
     if (op == Bytecodes::_illegal)
-      lose("no such primitive conversion", THREAD);
+      lose(err_msg("no such primitive conversion: %s -> %s", type2name(src.basic_type()), type2name(type)), THREAD);
     else
-      lose("bad primitive conversion op", THREAD);
+      lose(err_msg("bad primitive conversion op: %s", Bytecodes::name(op)), THREAD);
     return make_prim_constant(type, &zero_jvalue, THREAD);
   }
 
@@ -1040,15 +1320,15 @@
 
 // Emit bytecodes for the given invoke instruction.
 MethodHandleWalker::ArgToken
-MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
+MethodHandleCompiler::make_invoke(methodHandle m, vmIntrinsics::ID iid,
                                   Bytecodes::Code op, bool tailcall,
                                   int argc, MethodHandleWalker::ArgToken* argv,
                                   TRAPS) {
   ArgToken zero;
-  if (m == NULL) {
+  if (m.is_null()) {
     // Get the intrinsic methodOop.
-    m = vmIntrinsics::method_for(iid);
-    if (m == NULL) {
+    m = methodHandle(THREAD, vmIntrinsics::method_for(iid));
+    if (m.is_null()) {
       lose(vmIntrinsics::name_at(iid), CHECK_(zero));
     }
   }
@@ -1057,13 +1337,46 @@
   Symbol*  name      = m->name();
   Symbol*  signature = m->signature();
 
-  if (tailcall) {
-    // Actually, in order to make these methods more recognizable,
-    // let's put them in holder class MethodHandle.  That way stack
-    // walkers and compiler heuristics can recognize them.
-    _target_klass = SystemDictionary::MethodHandle_klass();
+  if (iid == vmIntrinsics::_invokeGeneric &&
+      argc >= 1 && argv[0].token_type() == tt_constant) {
+    assert(m->intrinsic_id() == vmIntrinsics::_invokeExact, "");
+    Handle receiver = argv[0].object();
+    Handle rtype(THREAD, java_lang_invoke_MethodHandle::type(receiver()));
+    Handle mtype(THREAD, m->method_handle_type());
+    if (rtype() != mtype()) {
+      assert(java_lang_invoke_MethodType::form(rtype()) ==
+             java_lang_invoke_MethodType::form(mtype()),
+             "must be the same shape");
+      // customize m to the exact required rtype
+      bool has_non_bcp_klass = check_non_bcp_klasses(rtype(), CHECK_(zero));
+      TempNewSymbol sig2 = java_lang_invoke_MethodType::as_signature(rtype(), true, CHECK_(zero));
+      methodHandle m2;
+      if (!has_non_bcp_klass) {
+        methodOop m2_oop = SystemDictionary::find_method_handle_invoke(m->name(), sig2,
+                                                                       KlassHandle(), CHECK_(zero));
+        m2 = methodHandle(THREAD, m2_oop);
+      }
+      if (m2.is_null()) {
+        // just build it fresh
+        m2 = methodOopDesc::make_invoke_method(klass, m->name(), sig2, rtype, CHECK_(zero));
+        if (m2.is_null())
+          lose(err_msg("no customized invoker %s", sig2->as_utf8()), CHECK_(zero));
+      }
+      m = m2;
+      signature = m->signature();
+    }
   }
 
+  check_non_bcp_klass(klass, CHECK_(zero));
+  if (m->is_method_handle_invoke()) {
+    check_non_bcp_klasses(m->method_handle_type(), CHECK_(zero));
+  }
+
+  // Count the number of arguments, not the size
+  ArgumentCount asc(signature);
+  assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1),
+         "argc mismatch");
+
   // Inline the method.
   InvocationCounter* ic = m->invocation_counter();
   ic->set_carry_flag();
@@ -1096,7 +1409,7 @@
   int signature_index     = cpool_symbol_put(signature);
   int name_and_type_index = cpool_name_and_type_put(name_index, signature_index);
   int klass_index         = cpool_klass_put(klass);
-  int methodref_index     = cpool_methodref_put(klass_index, name_and_type_index);
+  int methodref_index     = cpool_methodref_put(op, klass_index, name_and_type_index, m);
 
   // Generate invoke.
   switch (op) {
@@ -1105,9 +1418,13 @@
   case Bytecodes::_invokevirtual:
     emit_bc(op, methodref_index);
     break;
-  case Bytecodes::_invokeinterface:
-    Unimplemented();
+
+  case Bytecodes::_invokeinterface: {
+    ArgumentSizeComputer asc(signature);
+    emit_bc(op, methodref_index, asc.size() + 1);
     break;
+  }
+
   default:
     ShouldNotReachHere();
   }
@@ -1116,8 +1433,23 @@
   // Otherwise, make a recursive call to some helper routine.
   BasicType rbt = m->result_type();
   if (rbt == T_ARRAY)  rbt = T_OBJECT;
+  stack_push(rbt);  // The return value is already pushed onto the stack.
   ArgToken ret;
   if (tailcall) {
+    if (return_conv() == zero_return_conv()) {
+      rbt = T_VOID;  // discard value
+    } else if (return_conv() != vmIntrinsics::_none) {
+      // return value conversion
+      int index = new_local_index(rbt);
+      emit_store(rbt, index);
+      ArgToken arglist[2];
+      arglist[0] = ArgToken(tt_temporary, rbt, index);
+      arglist[1] = ArgToken();  // sentinel
+      ret = make_invoke(methodHandle(), return_conv(), Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(zero));
+      set_return_conv(vmIntrinsics::_none);
+      rbt = ret.basic_type();
+      emit_load(rbt, ret.index());
+    }
     if (rbt != _rtype) {
       if (rbt == T_VOID) {
         // push a zero of the right sort
@@ -1161,8 +1493,10 @@
     case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break;
     case T_VOID:   emit_bc(Bytecodes::_return);  break;
     case T_OBJECT:
-      if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass())
+      if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass() && !Klass::cast(_rklass())->is_interface()) {
         emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass()));
+        check_non_bcp_klass(_rklass(), CHECK_(zero));
+      }
       emit_bc(Bytecodes::_areturn);
       break;
     default: ShouldNotReachHere();
@@ -1170,7 +1504,6 @@
     ret = ArgToken();  // Dummy return value.
   }
   else {
-    stack_push(rbt);  // The return value is already pushed onto the stack.
     int index = new_local_index(rbt);
     switch (rbt) {
     case T_BOOLEAN: case T_BYTE: case T_CHAR:  case T_SHORT:
@@ -1195,8 +1528,32 @@
                                  const MethodHandleWalker::ArgToken& base,
                                  const MethodHandleWalker::ArgToken& offset,
                                  TRAPS) {
-  Unimplemented();
-  return ArgToken();
+  switch (base.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(base.basic_type(), base.index());
+      break;
+    case tt_constant:
+      emit_load_constant(base);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  switch (offset.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(offset.basic_type(), offset.index());
+      break;
+    case tt_constant:
+      emit_load_constant(offset);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  emit_bc(op);
+  int index = new_local_index(type);
+  emit_store(type, index);
+  return ArgToken(tt_temporary, type, index);
 }
 
 
@@ -1219,7 +1576,7 @@
 
 //   for (int i = 1, imax = _constants.length(); i < imax; i++) {
 //     ConstantValue* con = _constants.at(i);
-//     if (con != NULL && con->is_primitive() && con->_type == bt) {
+//     if (con != NULL && con->is_primitive() && con.basic_type() == bt) {
 //       bool match = false;
 //       switch (type2size[bt]) {
 //       case 1:  if (pcon->_value.i == con->i)  match = true;  break;
@@ -1239,6 +1596,52 @@
   return index;
 }
 
+bool MethodHandleCompiler::check_non_bcp_klasses(Handle method_type, TRAPS) {
+  bool res = false;
+  for (int i = -1, len = java_lang_invoke_MethodType::ptype_count(method_type()); i < len; i++) {
+    oop ptype = (i == -1
+                 ? java_lang_invoke_MethodType::rtype(method_type())
+                 : java_lang_invoke_MethodType::ptype(method_type(), i));
+    res |= check_non_bcp_klass(java_lang_Class::as_klassOop(ptype), CHECK_(false));
+  }
+  return res;
+}
+
+bool MethodHandleCompiler::check_non_bcp_klass(klassOop klass, TRAPS) {
+  klass = methodOopDesc::check_non_bcp_klass(klass);
+  if (klass != NULL) {
+    Symbol* name = Klass::cast(klass)->name();
+    for (int i = _non_bcp_klasses.length() - 1; i >= 0; i--) {
+      klassOop k2 = _non_bcp_klasses.at(i)();
+      if (Klass::cast(k2)->name() == name) {
+        if (k2 != klass) {
+          lose(err_msg("unsupported klass name alias %s", name->as_utf8()), THREAD);
+        }
+        return true;
+      }
+    }
+    _non_bcp_klasses.append(KlassHandle(THREAD, klass));
+    return true;
+  }
+  return false;
+}
+
+void MethodHandleCompiler::record_non_bcp_klasses() {
+  // Append extra klasses to constant pool, to guide klass lookup.
+  for (int k = 0; k < _non_bcp_klasses.length(); k++) {
+    klassOop non_bcp_klass = _non_bcp_klasses.at(k)();
+    bool add_to_cp = true;
+    for (int j = 1; j < _constants.length(); j++) {
+      ConstantValue* cv = _constants.at(j);
+      if (cv != NULL && cv->tag() == JVM_CONSTANT_Class
+          && cv->klass_oop() == non_bcp_klass) {
+        add_to_cp = false;
+        break;
+      }
+    }
+    if (add_to_cp)  cpool_klass_put(non_bcp_klass);
+  }
+}
 
 constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const {
   constantPoolHandle nullHandle;
@@ -1258,6 +1661,8 @@
     case JVM_CONSTANT_Double:      cpool->double_at_put(       i, cv->get_jdouble()                    ); break;
     case JVM_CONSTANT_Class:       cpool->klass_at_put(        i, cv->klass_oop()                      ); break;
     case JVM_CONSTANT_Methodref:   cpool->method_at_put(       i, cv->first_index(), cv->second_index()); break;
+    case JVM_CONSTANT_InterfaceMethodref:
+                                cpool->interface_method_at_put(i, cv->first_index(), cv->second_index()); break;
     case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break;
     case JVM_CONSTANT_Object:      cpool->object_at_put(       i, cv->object_oop()                     ); break;
     default: ShouldNotReachHere();
@@ -1272,6 +1677,8 @@
     }
   }
 
+  cpool->set_preresolution();
+
   // Set the constant pool holder to the target method's class.
   cpool->set_pool_holder(_target_klass());
 
@@ -1318,6 +1725,34 @@
   objArrayHandle methods(THREAD, m_array);
   methods->obj_at_put(0, m());
   Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty));  // Use fake class.
+  Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty));  // Use fake class.
+
+  // Pre-resolve selected CP cache entries, to avoid problems with class loader scoping.
+  constantPoolCacheHandle cpc(THREAD, cpool->cache());
+  for (int i = 0; i < cpc->length(); i++) {
+    ConstantPoolCacheEntry* e = cpc->entry_at(i);
+    assert(!e->is_secondary_entry(), "no indy instructions in here, yet");
+    int constant_pool_index = e->constant_pool_index();
+    ConstantValue* cv = _constants.at(constant_pool_index);
+    if (!cv->has_linkage())  continue;
+    methodHandle m = cv->linkage();
+    int index;
+    switch (cv->tag()) {
+    case JVM_CONSTANT_Methodref:
+      index = m->vtable_index();
+      if (m->is_static()) {
+        e->set_method(Bytecodes::_invokestatic, m, index);
+      } else {
+        e->set_method(Bytecodes::_invokespecial, m, index);
+        e->set_method(Bytecodes::_invokevirtual, m, index);
+      }
+      break;
+    case JVM_CONSTANT_InterfaceMethodref:
+      index = klassItable::compute_itable_index(m());
+      e->set_interface_call(m, index);
+      break;
+    }
+  }
 
   // Set the invocation counter's count to the invoke count of the
   // original call site.
@@ -1370,13 +1805,11 @@
     _strbuf.reset();
     return s;
   }
-  ArgToken token(const char* str) {
-    jvalue string_con;
-    string_con.j = (intptr_t) str;
-    return ArgToken(tt_symbolic, T_LONG, string_con);
+  ArgToken token(const char* str, BasicType type) {
+    return ArgToken(str, type);
   }
   const char* string(ArgToken token) {
-    return (const char*) (intptr_t) token.get_jlong();
+    return token.str();
   }
   void start_params() {
     _param_state <<= 1;
@@ -1395,12 +1828,12 @@
   }
   ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) {
     const char* value = strbuf();
-    if (!_verbose)  return token(value);
+    if (!_verbose)  return token(value, type);
     // make an explicit binding for each separate value
     _strbuf.print("%s%d", temp_name, ++_temp_num);
     const char* temp = strbuf();
     _out->print("\n  %s %s %s = %s;", statement_op, type2name(type), temp, value);
-    return token(temp);
+    return token(temp, type);
   }
 
 public:
@@ -1411,12 +1844,15 @@
       _param_state(0),
       _temp_num(0)
   {
+    out->print("MethodHandle:");
+    java_lang_invoke_MethodType::print_signature(java_lang_invoke_MethodHandle::type(root()), out);
+    out->print(" : #");
     start_params();
   }
   virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
     if (argnum < 0) {
       end_params();
-      return token("return");
+      return token("return", type);
     }
     if ((_param_state & 1) == 0) {
       _param_state |= 1;
@@ -1431,7 +1867,7 @@
     const char* arg = strbuf();
     put_type_name(type, tk, _out);
     _out->print(" %s", arg);
-    return token(arg);
+    return token(arg, type);
   }
   virtual ArgToken make_oop_constant(oop con, TRAPS) {
     if (con == NULL)
@@ -1474,12 +1910,12 @@
     _strbuf.print(")");
     return maybe_make_temp("fetch", type, "x");
   }
-  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid,
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid,
                                Bytecodes::Code op, bool tailcall,
                                int argc, ArgToken* argv, TRAPS) {
     Symbol* name;
     Symbol* sig;
-    if (m != NULL) {
+    if (m.not_null()) {
       name = m->name();
       sig  = m->signature();
     } else {
@@ -1518,7 +1954,7 @@
     out->print("\n");
   }
   static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) {
-    EXCEPTION_MARK;
+    Thread* THREAD = Thread::current();
     ResourceMark rm;
     MethodHandlePrinter printer(root, verbose, out, THREAD);
     if (!HAS_PENDING_EXCEPTION)
--- a/hotspot/src/share/vm/prims/methodHandleWalk.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -56,6 +56,10 @@
   int BoundMethodHandle_vmargslot()    { return java_lang_invoke_BoundMethodHandle::vmargslot(method_handle_oop()); }
   int AdapterMethodHandle_conversion() { return java_lang_invoke_AdapterMethodHandle::conversion(method_handle_oop()); }
 
+#ifdef ASSERT
+  void print_impl(TRAPS);
+#endif
+
 public:
   MethodHandleChain(Handle root, TRAPS)
     : _root(root)
@@ -94,11 +98,21 @@
   int       bound_arg_slot()    { assert(is_bound(), ""); return _arg_slot; }
   oop       bound_arg_oop()     { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); }
 
+  methodHandle last_method()    { assert(is_last(), ""); return _last_method; }
   methodOop last_method_oop()   { assert(is_last(), ""); return _last_method(); }
   Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; }
 
   void lose(const char* msg, TRAPS);
   const char* lose_message()    { return _lose_message; }
+
+#ifdef ASSERT
+  // Print a symbolic description of a method handle chain, including
+  // the signature for each method.  The signatures are printed in
+  // slot order to make it easier to understand.
+  void print();
+  static void print(Handle mh);
+  static void print(oopDesc* mh);
+#endif
 };
 
 
@@ -126,39 +140,35 @@
     Handle    _handle;
 
   public:
-    ArgToken(TokenType tt = tt_illegal) : _tt(tt) {}
-    ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {}
+    ArgToken(TokenType tt = tt_illegal) : _tt(tt), _bt(tt == tt_void ? T_VOID : T_ILLEGAL) {
+      assert(tt == tt_illegal || tt == tt_void, "invalid token type");
+    }
 
     ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) {
+      assert(_tt == tt_parameter || _tt == tt_temporary, "must have index");
       _value.i = index;
     }
 
-    ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) {
-      _handle = value;
+    ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) { assert(_bt != T_OBJECT, "wrong constructor"); }
+    ArgToken(Handle handle) : _tt(tt_constant), _bt(T_OBJECT), _handle(handle) {}
+
+
+    ArgToken(const char* str, BasicType type) : _tt(tt_symbolic), _bt(type) {
+      _value.j = (intptr_t)str;
     }
 
     TokenType token_type()  const { return _tt; }
     BasicType basic_type()  const { return _bt; }
-    int       index()       const { return _value.i; }
-    Handle    object()      const { return _handle; }
-
-    jint      get_jint()    const { return _value.i; }
-    jlong     get_jlong()   const { return _value.j; }
-    jfloat    get_jfloat()  const { return _value.f; }
-    jdouble   get_jdouble() const { return _value.d; }
-  };
+    bool      has_index()   const { return _tt == tt_parameter || _tt == tt_temporary; }
+    int       index()       const { assert(has_index(), "must have index");; return _value.i; }
+    Handle    object()      const { assert(_bt == T_OBJECT, "wrong accessor"); assert(_tt == tt_constant, "value type"); return _handle; }
+    const char* str()       const { assert(_tt == tt_symbolic, "string type"); return (const char*)(intptr_t)_value.j; }
 
-  // Abstract interpretation state:
-  struct SlotState {
-    BasicType _type;
-    ArgToken  _arg;
-    SlotState() : _type(), _arg() {}
+    jint      get_jint()    const { assert(_bt == T_INT || is_subword_type(_bt), "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.i; }
+    jlong     get_jlong()   const { assert(_bt == T_LONG, "wrong accessor");   assert(_tt == tt_constant, "value types"); return _value.j; }
+    jfloat    get_jfloat()  const { assert(_bt == T_FLOAT, "wrong accessor");  assert(_tt == tt_constant, "value types"); return _value.f; }
+    jdouble   get_jdouble() const { assert(_bt == T_DOUBLE, "wrong accessor"); assert(_tt == tt_constant, "value types"); return _value.d; }
   };
-  static SlotState make_state(BasicType type, ArgToken arg) {
-    SlotState ss;
-    ss._type = type; ss._arg = arg;
-    return ss;
-  }
 
 private:
   MethodHandleChain _chain;
@@ -169,33 +179,31 @@
   // TOS is always _outgoing.at(0), so simple pushes and pops shift the whole _outgoing array.
   // If there is a receiver in the current argument list, it is at _outgoing.at(_outgoing.length()-1).
   // If a value at _outgoing.at(n) is T_LONG or T_DOUBLE, the value at _outgoing.at(n+1) is T_VOID.
-  GrowableArray<SlotState> _outgoing;       // current outgoing parameter slots
+  GrowableArray<ArgToken>  _outgoing;       // current outgoing parameter slots
   int                      _outgoing_argc;  // # non-empty outgoing slots
 
+  vmIntrinsics::ID _return_conv;            // Return conversion required by raw retypes.
+
   // Replace a value of type old_type at slot (and maybe slot+1) with the new value.
   // If old_type != T_VOID, remove the old argument at that point.
   // If new_type != T_VOID, insert the new argument at that point.
   // Insert or delete a second empty slot as needed.
-  void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg);
+  void change_argument(BasicType old_type, int slot, const ArgToken& new_arg);
+  void change_argument(BasicType old_type, int slot, BasicType type, const ArgToken& new_arg) {
+    assert(type == new_arg.basic_type(), "must agree");
+    change_argument(old_type, slot, new_arg);
+  }
 
   // Raw retype conversions for OP_RAW_RETYPE.
   void retype_raw_conversion(BasicType src, BasicType dst, bool for_return, int slot, TRAPS);
   void retype_raw_argument_type(BasicType src, BasicType dst, int slot, TRAPS) { retype_raw_conversion(src, dst, false, slot, CHECK); }
   void retype_raw_return_type(  BasicType src, BasicType dst,           TRAPS) { retype_raw_conversion(src, dst, true,  -1,   CHECK); }
 
-  SlotState* slot_state(int slot) {
-    if (slot < 0 || slot >= _outgoing.length())
-      return NULL;
-    return _outgoing.adr_at(slot);
+  BasicType arg_type(int slot) {
+    return _outgoing.at(slot).basic_type();
   }
-  BasicType slot_type(int slot) {
-    SlotState* ss = slot_state(slot);
-    if (ss == NULL)
-      return T_ILLEGAL;
-    return ss->_type;
-  }
-  bool slot_has_argument(int slot) {
-    return slot_type(slot) < T_VOID;
+  bool has_argument(int slot) {
+    return arg_type(slot) < T_VOID;
   }
 
 #ifdef ASSERT
@@ -207,12 +215,15 @@
 
   void walk_incoming_state(TRAPS);
 
+  void verify_args_and_signature(TRAPS) NOT_DEBUG_RETURN;
+
 public:
   MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS)
     : _chain(root, THREAD),
       _for_invokedynamic(for_invokedynamic),
       _outgoing(THREAD, 10),
-      _outgoing_argc(0)
+      _outgoing_argc(0),
+      _return_conv(vmIntrinsics::_none)
   {
     _local_index = for_invokedynamic ? 0 : 1;
   }
@@ -221,6 +232,10 @@
 
   bool for_invokedynamic() const { return _for_invokedynamic; }
 
+  vmIntrinsics::ID return_conv() const { return _return_conv; }
+  void set_return_conv(vmIntrinsics::ID c) { _return_conv = c; }
+  static vmIntrinsics::ID zero_return_conv() { return vmIntrinsics::_min; }
+
   int new_local_index(BasicType bt) {
     //int index = _for_invokedynamic ? _local_index : _local_index - 1;
     int index = _local_index;
@@ -236,9 +251,9 @@
   virtual ArgToken make_oop_constant(oop con, TRAPS) = 0;
   virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS) = 0;
   virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS) = 0;
-  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0;
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS) = 0;
 
-  // For make_invoke, the methodOop can be NULL if the intrinsic ID
+  // For make_invoke, the methodHandle can be NULL if the intrinsic ID
   // is something other than vmIntrinsics::_none.
 
   // and in case anyone cares to related the previous actions to the chain:
@@ -256,7 +271,6 @@
 // The IR happens to be JVM bytecodes.
 class MethodHandleCompiler : public MethodHandleWalker {
 private:
-  methodHandle _callee;
   int          _invoke_count;  // count the original call site has been executed
   KlassHandle  _rklass;        // Return type for casting.
   BasicType    _rtype;
@@ -268,12 +282,13 @@
   static jvalue one_jvalue;
 
   // Fake constant pool entry.
-  class ConstantValue {
+  class ConstantValue : public ResourceObj {
   private:
     int       _tag;   // Constant pool tag type.
     JavaValue _value;
     Handle    _handle;
     Symbol*   _sym;
+    methodHandle _method;  // pre-linkage
 
   public:
     // Constructor for oop types.
@@ -322,11 +337,21 @@
     jlong     get_jlong()    const { return _value.get_jlong();   }
     jfloat    get_jfloat()   const { return _value.get_jfloat();  }
     jdouble   get_jdouble()  const { return _value.get_jdouble(); }
+
+    void set_linkage(methodHandle method) {
+      assert(_method.is_null(), "");
+      _method = method;
+    }
+    bool     has_linkage()   const { return _method.not_null(); }
+    methodHandle linkage()   const { return _method; }
   };
 
   // Fake constant pool.
   GrowableArray<ConstantValue*> _constants;
 
+  // Non-BCP classes that appear in associated MethodTypes (require special handling).
+  GrowableArray<KlassHandle> _non_bcp_klasses;
+
   // Accumulated compiler state:
   GrowableArray<unsigned char> _bytecode;
 
@@ -362,15 +387,20 @@
     return _constants.append(cv);
   }
 
-  int cpool_oop_reference_put(int tag, int first_index, int second_index) {
+  int cpool_oop_reference_put(int tag, int first_index, int second_index, methodHandle method) {
     if (first_index == 0 && second_index == 0)  return 0;
     assert(first_index != 0 && second_index != 0, "no zero indexes");
     ConstantValue* cv = new ConstantValue(tag, first_index, second_index);
+    if (method.not_null())  cv->set_linkage(method);
     return _constants.append(cv);
   }
 
   int cpool_primitive_put(BasicType type, jvalue* con);
 
+  bool check_non_bcp_klasses(Handle method_type, TRAPS);
+  bool check_non_bcp_klass(klassOop klass, TRAPS);
+  void record_non_bcp_klasses();
+
   int cpool_int_put(jint value) {
     jvalue con; con.i = value;
     return cpool_primitive_put(T_INT, &con);
@@ -397,14 +427,15 @@
   int cpool_klass_put(klassOop klass) {
     return cpool_oop_put(JVM_CONSTANT_Class, klass);
   }
-  int cpool_methodref_put(int class_index, int name_and_type_index) {
-    return cpool_oop_reference_put(JVM_CONSTANT_Methodref, class_index, name_and_type_index);
+  int cpool_methodref_put(Bytecodes::Code op, int class_index, int name_and_type_index, methodHandle method) {
+    int tag = (op == Bytecodes::_invokeinterface ? JVM_CONSTANT_InterfaceMethodref : JVM_CONSTANT_Methodref);
+    return cpool_oop_reference_put(tag, class_index, name_and_type_index, method);
   }
   int cpool_name_and_type_put(int name_index, int signature_index) {
-    return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index);
+    return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index, methodHandle());
   }
 
-  void emit_bc(Bytecodes::Code op, int index = 0);
+  void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1);
   void emit_load(BasicType bt, int index);
   void emit_store(BasicType bt, int index);
   void emit_load_constant(ArgToken arg);
@@ -414,15 +445,15 @@
   }
   virtual ArgToken make_oop_constant(oop con, TRAPS) {
     Handle h(THREAD, con);
-    return ArgToken(tt_constant, T_OBJECT, h);
+    return ArgToken(h);
   }
   virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
-    return ArgToken(tt_constant, type, *con);
+    return ArgToken(type, *con);
   }
 
   virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS);
   virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS);
-  virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
+  virtual ArgToken make_invoke(methodHandle m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
 
   // Get a real constant pool.
   constantPoolHandle get_constant_pool(TRAPS) const;
@@ -431,7 +462,7 @@
   methodHandle get_method_oop(TRAPS) const;
 
 public:
-  MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool for_invokedynamic, TRAPS);
+  MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS);
 
   // Compile the given MH chain into bytecode.
   methodHandle compile(TRAPS);
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -24,10 +24,14 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
+#include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "prims/methodHandles.hpp"
+#include "prims/methodHandleWalk.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/signature.hpp"
@@ -158,8 +162,7 @@
 };
 
 // Adapters.
-MethodHandlesAdapterBlob* MethodHandles::_adapter_code      = NULL;
-int                       MethodHandles::_adapter_code_size = StubRoutines::method_handles_adapters_code_size;
+MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
 
 jobject MethodHandles::_raise_exception_method;
 
@@ -198,9 +201,9 @@
 
   ResourceMark rm;
   TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
-  _adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size);
+  _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
   if (_adapter_code == NULL)
-    vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
+    vm_exit_out_of_memory(adapter_code_size, "CodeCache: no room for MethodHandles adapters");
   CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
   g.generate();
@@ -628,6 +631,8 @@
   // convert the external string name to an internal symbol
   TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str());
   if (name == NULL)  return;  // no such name
+  if (name == vmSymbols::class_initializer_name())
+    return; // illegal name
 
   Handle polymorphic_method_type;
   bool polymorphic_signature = false;
@@ -764,7 +769,9 @@
         m = NULL;
         // try again with a different class loader...
       }
-      if (m != NULL) {
+      if (m != NULL &&
+          m->is_method_handle_invoke() &&
+          java_lang_invoke_MethodType::equals(polymorphic_method_type(), m->method_handle_type())) {
         int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS);
         java_lang_invoke_MemberName::set_vmtarget(mname(),  m);
         java_lang_invoke_MemberName::set_vmindex(mname(),   m->vtable_index());
@@ -983,6 +990,48 @@
 // This is for debugging and reflection.
 oop MethodHandles::encode_target(Handle mh, int format, TRAPS) {
   assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH");
+  if (format == ETF_FORCE_DIRECT_HANDLE ||
+      format == ETF_COMPILE_DIRECT_HANDLE) {
+    // Internal function for stress testing.
+    Handle mt = java_lang_invoke_MethodHandle::type(mh());
+    int invocation_count = 10000;
+    TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK_NULL);
+    bool omit_receiver_argument = true;
+    MethodHandleCompiler mhc(mh, vmSymbols::invoke_name(), signature, invocation_count, omit_receiver_argument, CHECK_NULL);
+    methodHandle m = mhc.compile(CHECK_NULL);
+    if (StressMethodHandleWalk && Verbose || PrintMiscellaneous) {
+      tty->print_cr("MethodHandleNatives.getTarget(%s)",
+                    format == ETF_FORCE_DIRECT_HANDLE ? "FORCE_DIRECT" : "COMPILE_DIRECT");
+      if (Verbose) {
+        m->print_codes();
+      }
+    }
+    if (StressMethodHandleWalk) {
+      InterpreterOopMap mask;
+      OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask);
+    }
+    if ((format == ETF_COMPILE_DIRECT_HANDLE ||
+         CompilationPolicy::must_be_compiled(m))
+        && !instanceKlass::cast(m->method_holder())->is_not_initialized()
+        && CompilationPolicy::can_be_compiled(m)) {
+      // Force compilation
+      CompileBroker::compile_method(m, InvocationEntryBci,
+                                    CompLevel_initial_compile,
+                                    methodHandle(), 0, "MethodHandleNatives.getTarget",
+                                    CHECK_NULL);
+    }
+    // Now wrap m in a DirectMethodHandle.
+    instanceKlassHandle dmh_klass(THREAD, SystemDictionary::DirectMethodHandle_klass());
+    Handle dmh = dmh_klass->allocate_instance_handle(CHECK_NULL);
+    JavaValue ignore_result(T_VOID);
+    Symbol* init_name = vmSymbols::object_initializer_name();
+    Symbol* init_sig  = vmSymbols::notifyGenericMethodType_signature();
+    JavaCalls::call_special(&ignore_result, dmh,
+                            SystemDictionaryHandles::MethodHandle_klass(), init_name, init_sig,
+                            java_lang_invoke_MethodHandle::type(mh()), CHECK_NULL);
+    MethodHandles::init_DirectMethodHandle(dmh, m, false, CHECK_NULL);
+    return dmh();
+  }
   if (format == ETF_HANDLE_OR_METHOD_NAME) {
     oop target = java_lang_invoke_MethodHandle::vmtarget(mh());
     if (target == NULL) {
@@ -1218,6 +1267,12 @@
           klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK);
           if (aklass_oop != NULL)
             aklass = KlassHandle(THREAD, aklass_oop);
+          if (aklass.is_null() &&
+              pklass.not_null() &&
+              loader.is_null() &&
+              pklass->name() == name)
+            // accept name equivalence here, since that's the best we can do
+            aklass = pklass;
         }
       } else {
         // for method handle invokers we don't look at the name in the signature
@@ -1303,6 +1358,7 @@
   // Verify that argslot points at the given argnum.
   int check_slot = argument_slot(java_lang_invoke_MethodHandle::type(mh()), argnum);
   if (argslot != check_slot || argslot < 0) {
+    ResourceMark rm;
     const char* fmt = "for argnum of %d, vmargslot is %d, should be %d";
     size_t msglen = strlen(fmt) + 3*11 + 1;
     char* msg = NEW_RESOURCE_ARRAY(char, msglen);
@@ -1718,6 +1774,7 @@
 
 void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum,
                                              bool direct_to_method, TRAPS) {
+  ResourceMark rm;
   Handle ptype_handle(THREAD,
                            java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum));
   KlassHandle ptype_klass;
@@ -1827,6 +1884,7 @@
   bool direct_to_method = false;
   if (OptimizeMethodHandles &&
       target->klass() == SystemDictionary::DirectMethodHandle_klass() &&
+      (argnum != 0 || java_lang_invoke_BoundMethodHandle::argument(mh()) != NULL) &&
       (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) {
     KlassHandle receiver_limit; int decode_flags = 0;
     methodHandle m = decode_method(target(), receiver_limit, decode_flags);
@@ -1880,6 +1938,7 @@
 }
 
 void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) {
+  ResourceMark rm;
   jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
   int  argslot    = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
 
@@ -1972,53 +2031,77 @@
       }
       break;
     case _adapter_swap_args:
-    case _adapter_rot_args:
       {
-        if (!src || src != dest) {
+        if (!src || !dest) {
           err = "adapter requires src/dest conversion subfields for swap"; break;
         }
-        int swap_size = type2size[src];
-        int slot_limit = java_lang_invoke_MethodHandle::vmslots(target());
+        int src_size  = type2size[src];
+        if (src_size != type2size[dest]) {
+          err = "adapter requires equal sizes for src/dest"; break;
+        }
         int src_slot   = argslot;
         int dest_slot  = vminfo;
-        bool rotate_up = (src_slot > dest_slot); // upward rotation
         int src_arg    = argnum;
-        int dest_arg   = argument_slot_to_argnum(dst_mtype(), dest_slot);
+        int dest_arg   = argument_slot_to_argnum(src_mtype(), dest_slot);
         verify_vmargslot(mh, dest_arg, dest_slot, CHECK);
-        if (!(dest_slot >= src_slot + swap_size) &&
-            !(src_slot >= dest_slot + swap_size)) {
-          err = "source, destination slots must be distinct";
-        } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) {
-          err = "source of swap must be deeper in stack";
-        } else if (ek == _adapter_swap_args) {
-          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg),
-                                           java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg),
-                                           dest_arg);
-        } else if (ek == _adapter_rot_args) {
-          if (rotate_up) {
-            assert((src_slot > dest_slot) && (src_arg < dest_arg), "");
-            // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot]
-            // that is:   [src_arg+1..dest_arg] --> [src_arg..dest_arg-1]
-            for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) {
-              err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
-                                               java_lang_invoke_MethodType::ptype(dst_mtype(), i-1),
-                                               i);
-            }
-          } else { // rotate down
-            assert((src_slot < dest_slot) && (src_arg > dest_arg), "");
-            // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss]
-            // that is:     [dest_arg..src_arg-1] --> [dst_arg+1..src_arg]
-            for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) {
-              err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
-                                               java_lang_invoke_MethodType::ptype(dst_mtype(), i+1),
-                                               i);
-            }
-          }
+        if (!(dest_slot >= src_slot + src_size) &&
+            !(src_slot >= dest_slot + src_size)) {
+          err = "source, destination slots must be distinct"; break;
+        } else if (!(src_slot > dest_slot)) {
+          err = "source of swap must be deeper in stack"; break;
         }
+        err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), dest_arg),
+                                         java_lang_invoke_MethodType::ptype(dst_mtype(), src_arg),
+                                         dest_arg);
         if (err == NULL)
           err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg),
                                            java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg),
                                            src_arg);
+        break;
+      }
+    case _adapter_rot_args:
+      {
+        if (!src || !dest) {
+          err = "adapter requires src/dest conversion subfields for rotate"; break;
+        }
+        int src_slot   = argslot;
+        int limit_raw  = vminfo;
+        bool rot_down  = (src_slot < limit_raw);
+        int limit_bias = (rot_down ? MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS : 0);
+        int limit_slot = limit_raw - limit_bias;
+        int src_arg    = argnum;
+        int limit_arg  = argument_slot_to_argnum(src_mtype(), limit_slot);
+        verify_vmargslot(mh, limit_arg, limit_slot, CHECK);
+        if (src_slot == limit_slot) {
+          err = "source, destination slots must be distinct"; break;
+        }
+        if (!rot_down) {  // rotate slots up == shift arguments left
+          // limit_slot is an inclusive lower limit
+          assert((src_slot > limit_slot) && (src_arg < limit_arg), "");
+          // rotate up: [limit_slot..src_slot-ss] --> [limit_slot+ss..src_slot]
+          // that is:   [src_arg+1..limit_arg] --> [src_arg..limit_arg-1]
+          for (int i = src_arg+1; i <= limit_arg && err == NULL; i++) {
+            err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
+                                             java_lang_invoke_MethodType::ptype(dst_mtype(), i-1),
+                                             i);
+          }
+        } else { // rotate slots down == shfit arguments right
+          // limit_slot is an exclusive upper limit
+          assert((src_slot < limit_slot - limit_bias) && (src_arg > limit_arg + limit_bias), "");
+          // rotate down: [src_slot+ss..limit_slot) --> [src_slot..limit_slot-ss)
+          // that is:     (limit_arg..src_arg-1] --> (dst_arg+1..src_arg]
+          for (int i = limit_arg+1; i <= src_arg-1 && err == NULL; i++) {
+            err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), i),
+                                             java_lang_invoke_MethodType::ptype(dst_mtype(), i+1),
+                                             i);
+          }
+        }
+        if (err == NULL) {
+          int dest_arg = (rot_down ? limit_arg+1 : limit_arg);
+          err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), src_arg),
+                                           java_lang_invoke_MethodType::ptype(dst_mtype(), dest_arg),
+                                           src_arg);
+        }
       }
       break;
     case _adapter_spread_args:
@@ -2331,7 +2414,6 @@
   case _adapter_rot_args:
     {
       int swap_slots = type2size[src];
-      int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(mh());
       int src_slot   = argslot;
       int dest_slot  = vminfo;
       int rotate     = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1;
@@ -2561,7 +2643,7 @@
   }
 
   if (err != NULL) {
-    throw_InternalError_for_bad_conversion(conversion, err, THREAD);
+    throw_InternalError_for_bad_conversion(conversion, err_msg("%s: conv_op %d ek_opt %d", err, conv_op, ek_opt), THREAD);
     return;
   }
 
@@ -2599,6 +2681,61 @@
   }
 }
 
+#ifdef ASSERT
+
+extern "C"
+void print_method_handle(oop mh);
+
+static void stress_method_handle_walk_impl(Handle mh, TRAPS) {
+  if (StressMethodHandleWalk) {
+    // Exercise the MethodHandleWalk code in various ways and validate
+    // the resulting method oop.  Some of these produce output so they
+    // are guarded under Verbose.
+    ResourceMark rm;
+    HandleMark hm;
+    if (Verbose) {
+      print_method_handle(mh());
+    }
+    TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK);
+    Handle mt = java_lang_invoke_MethodHandle::type(mh());
+    TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK);
+    MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK);
+    methodHandle m = mhc.compile(CHECK);
+    if (Verbose) {
+      m->print_codes();
+    }
+    InterpreterOopMap mask;
+    OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask);
+    // compile to object code if -Xcomp or WizardMode
+    if ((WizardMode ||
+         CompilationPolicy::must_be_compiled(m))
+        && !instanceKlass::cast(m->method_holder())->is_not_initialized()
+        && CompilationPolicy::can_be_compiled(m)) {
+      // Force compilation
+      CompileBroker::compile_method(m, InvocationEntryBci,
+                                    CompLevel_initial_compile,
+                                    methodHandle(), 0, "StressMethodHandleWalk",
+                                    CHECK);
+    }
+  }
+}
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {
+  stress_method_handle_walk_impl(mh, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = PENDING_EXCEPTION;
+    CLEAR_PENDING_EXCEPTION;
+    tty->print("StressMethodHandleWalk: ");
+    java_lang_Throwable::print(ex, tty);
+    tty->cr();
+  }
+}
+#else
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {}
+
+#endif
+
 //
 // Here are the native methods on sun.invoke.MethodHandleImpl.
 // They are the private interface between this JVM and the HotSpot-specific
@@ -2615,14 +2752,14 @@
   ResourceMark rm;              // for error messages
 
   // This is the guy we are initializing:
-  if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
 
   // Early returns out of this method leave the DMH in an unfinished state.
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
   // which method are we really talking about?
-  if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
   if (java_lang_invoke_MemberName::is_instance(target()) &&
       java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) {
@@ -2666,6 +2803,7 @@
   }
 
   MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2675,13 +2813,13 @@
   ResourceMark rm;              // for error messages
 
   // This is the guy we are initializing:
-  if (mh_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
 
   // Early returns out of this method leave the BMH in an unfinished state.
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
-  if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
 
   if (!java_lang_invoke_MethodHandle::is_instance(target())) {
@@ -2694,11 +2832,16 @@
                                                        receiver_limit,
                                                        decode_flags,
                                                        CHECK);
-    return;
+  } else {
+    // Build a BMH on top of a DMH or another BMH:
+    MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
   }
 
-  // Build a BMH on top of a DMH or another BMH:
-  MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
+  if (StressMethodHandleWalk) {
+    if (mh->klass() == SystemDictionary::BoundMethodHandle_klass())
+      stress_method_handle_walk(mh, CHECK);
+    // else don't, since the subclass has not yet initialized its own fields
+  }
 }
 JVM_END
 
@@ -2706,9 +2849,8 @@
 JVM_ENTRY(void, MHN_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh,
                              jobject target_jh, int argnum)) {
   // This is the guy we are initializing:
-  if (mh_jh == NULL || target_jh == NULL) {
-    THROW(vmSymbols::java_lang_InternalError());
-  }
+  if (mh_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "self is null"); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh));
   Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
 
@@ -2716,6 +2858,7 @@
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
   MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2762,6 +2905,8 @@
     return MethodHandles::stack_move_unit();
   case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK:
     return MethodHandles::adapter_conversion_ops_supported_mask();
+  case MethodHandles::GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS:
+    return MethodHandles::OP_ROT_ARGS_DOWN_LIMIT_BIAS;
   }
   return 0;
 }
@@ -2773,6 +2918,8 @@
   /* template(MethodHandles,GC_JVM_PUSH_LIMIT) */  \
   /* hold back this one until JDK stabilizes */ \
   /* template(MethodHandles,GC_JVM_STACK_MOVE_UNIT) */ \
+  /* hold back this one until JDK stabilizes */ \
+  /* template(MethodHandles,GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS) */ \
     template(MethodHandles,ETF_HANDLE_OR_METHOD_NAME) \
     template(MethodHandles,ETF_DIRECT_HANDLE) \
     template(MethodHandles,ETF_METHOD_NAME) \
@@ -2842,7 +2989,8 @@
 
 // void init(MemberName self, AccessibleObject ref)
 JVM_ENTRY(void, MHN_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobject target_jh)) {
-  if (mname_jh == NULL || target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
+  if (target_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "target is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
   oop target_oop = JNIHandles::resolve_non_null(target_jh);
   MethodHandles::init_MemberName(mname(), target_oop);
@@ -2851,7 +2999,7 @@
 
 // void expand(MemberName self)
 JVM_ENTRY(void, MHN_expand_Mem(JNIEnv *env, jobject igcls, jobject mname_jh)) {
-  if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
   MethodHandles::expand_MemberName(mname, 0, CHECK);
 }
@@ -2859,7 +3007,7 @@
 
 // void resolve(MemberName self, Class<?> caller)
 JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
-  if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
+  if (mname_jh == NULL) { THROW_MSG(vmSymbols::java_lang_InternalError(), "mname is null"); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
 
   // The trusted Java code that calls this method should already have performed
@@ -2922,6 +3070,59 @@
 }
 JVM_END
 
+methodOop MethodHandles::resolve_raise_exception_method(TRAPS) {
+  if (_raise_exception_method != NULL) {
+    // no need to do it twice
+    return raise_exception_method();
+  }
+  // LinkResolver::resolve_invokedynamic can reach this point
+  // because an invokedynamic has failed very early (7049415)
+  KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass();
+  if (MHN_klass.not_null()) {
+    TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK_NULL);
+    TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK_NULL);
+    methodOop raiseException_method  = instanceKlass::cast(MHN_klass->as_klassOop())
+                  ->find_method(raiseException_name, raiseException_sig);
+    if (raiseException_method != NULL && raiseException_method->is_static()) {
+      return raiseException_method;
+    }
+  }
+  // not found; let the caller deal with it
+  return NULL;
+}
+void MethodHandles::raise_exception(int code, oop actual, oop required, TRAPS) {
+  methodOop raiseException_method = resolve_raise_exception_method(CHECK);
+  if (raiseException_method != NULL &&
+      instanceKlass::cast(raiseException_method->method_holder())->is_not_initialized()) {
+    instanceKlass::cast(raiseException_method->method_holder())->initialize(CHECK);
+    // it had better be resolved by now, or maybe JSR 292 failed to load
+    raiseException_method = raise_exception_method();
+  }
+  if (raiseException_method == NULL) {
+    THROW_MSG(vmSymbols::java_lang_InternalError(), "no raiseException method");
+  }
+  JavaCallArguments args;
+  args.push_int(code);
+  args.push_oop(actual);
+  args.push_oop(required);
+  JavaValue result(T_VOID);
+  JavaCalls::call(&result, raiseException_method, &args, CHECK);
+}
+
+JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
+JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invokeExact cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
 
 /// JVM_RegisterMethodHandleMethods
 
@@ -2960,6 +3161,12 @@
   {CC"getMembers",              CC"("CLS""STRG""STRG"I"CLS"I["MEM")I",  FN_PTR(MHN_getMembers)}
 };
 
+static JNINativeMethod invoke_methods[] = {
+  // void init(MemberName self, AccessibleObject ref)
+  {CC"invoke",                  CC"(["OBJ")"OBJ,                FN_PTR(MH_invoke_UOE)},
+  {CC"invokeExact",             CC"(["OBJ")"OBJ,                FN_PTR(MH_invokeExact_UOE)}
+};
+
 // This one function is exported, used by NativeLookup.
 
 JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
@@ -2976,6 +3183,12 @@
     ThreadToNativeFromVM ttnfv(thread);
 
     int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod));
+    if (!env->ExceptionOccurred()) {
+      const char* L_MH_name = (JLINV "MethodHandle");
+      const char* MH_name = L_MH_name+1;
+      jclass MH_class = env->FindClass(MH_name);
+      status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod));
+    }
     if (env->ExceptionOccurred()) {
       MethodHandles::set_enabled(false);
       warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
@@ -2985,19 +3198,11 @@
   }
 
   if (enable_MH) {
-    KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass();
-    if (MHN_klass.not_null()) {
-      TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK);
-      TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK);
-      methodOop raiseException_method  = instanceKlass::cast(MHN_klass->as_klassOop())
-                    ->find_method(raiseException_name, raiseException_sig);
-      if (raiseException_method != NULL && raiseException_method->is_static()) {
-        MethodHandles::set_raise_exception_method(raiseException_method);
-      } else {
-        warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
-        enable_MH = false;
-      }
+    methodOop raiseException_method = MethodHandles::resolve_raise_exception_method(CHECK);
+    if (raiseException_method != NULL) {
+      MethodHandles::set_raise_exception_method(raiseException_method);
     } else {
+      warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
       enable_MH = false;
     }
   }
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -217,7 +217,6 @@
 
   // Adapters.
   static MethodHandlesAdapterBlob* _adapter_code;
-  static int                       _adapter_code_size;
 
   static bool ek_valid(EntryKind ek)            { return (uint)ek < (uint)_EK_LIMIT; }
   static bool conv_op_valid(int op)             { return (uint)op < (uint)CONV_OP_LIMIT; }
@@ -439,6 +438,9 @@
     assert(_raise_exception_method == NULL, "");
     _raise_exception_method = JNIHandles::make_global(Handle(rem));
   }
+  static methodOop resolve_raise_exception_method(TRAPS);
+  // call raise_exception_method from C code:
+  static void raise_exception(int code, oop actual, oop required, TRAPS);
 
   static jint adapter_conversion(int conv_op, BasicType src, BasicType dest,
                                  int stack_move = 0, int vminfo = 0) {
@@ -579,12 +581,18 @@
     GC_JVM_PUSH_LIMIT = 0,
     GC_JVM_STACK_MOVE_UNIT = 1,
     GC_CONV_OP_IMPLEMENTED_MASK = 2,
+    GC_OP_ROT_ARGS_DOWN_LIMIT_BIAS = 3,
 
     // format of result from getTarget / encode_target:
     ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method)
     ETF_DIRECT_HANDLE         = 1, // ultimate method handle (will be a DMH, may be self)
     ETF_METHOD_NAME           = 2, // ultimate method as MemberName
-    ETF_REFLECT_METHOD        = 3  // ultimate method as java.lang.reflect object (sans refClass)
+    ETF_REFLECT_METHOD        = 3, // ultimate method as java.lang.reflect object (sans refClass)
+    ETF_FORCE_DIRECT_HANDLE   = 64,
+    ETF_COMPILE_DIRECT_HANDLE = 65,
+
+    // ad hoc constants
+    OP_ROT_ARGS_DOWN_LIMIT_BIAS = -1
   };
   static int get_named_constant(int which, Handle name_box, TRAPS);
   static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code)
@@ -713,20 +721,16 @@
 # include "methodHandles_x86.hpp"
 #endif
 #ifdef TARGET_ARCH_sparc
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_sparc.hpp"
+# include "methodHandles_sparc.hpp"
 #endif
 #ifdef TARGET_ARCH_zero
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_zero.hpp"
+# include "methodHandles_zero.hpp"
 #endif
 #ifdef TARGET_ARCH_arm
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_arm.hpp"
+# include "methodHandles_arm.hpp"
 #endif
 #ifdef TARGET_ARCH_ppc
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_ppc.hpp"
+# include "methodHandles_ppc.hpp"
 #endif
 
 #ifdef TARGET_ARCH_NYI_6939861
@@ -830,7 +834,7 @@
 //
 class MethodHandlesAdapterGenerator : public StubCodeGenerator {
 public:
-  MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
+  MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code, PrintMethodHandleStubs) {}
 
   void generate();
 };
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -171,7 +171,7 @@
       // If a method has been stale for some time, remove it from the queue.
       if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
         if (PrintTieredEvents) {
-          print_event(KILL, method, method, task->osr_bci(), (CompLevel)task->comp_level());
+          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
         }
         CompileTaskWrapper ctw(task); // Frees the task
         compile_queue->remove(task);
@@ -192,7 +192,7 @@
   if (max_task->comp_level() == CompLevel_full_profile && is_method_profiled(max_method)) {
     max_task->set_comp_level(CompLevel_limited_profile);
     if (PrintTieredEvents) {
-      print_event(UPDATE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
+      print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
     }
   }
 
@@ -259,6 +259,17 @@
   return false;
 }
 
+// Inlining control: if we're compiling a profiled method with C1 and the callee
+// is known to have OSRed in a C2 version, don't inline it.
+bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
+  CompLevel comp_level = (CompLevel)env->comp_level();
+  if (comp_level == CompLevel_full_profile ||
+      comp_level == CompLevel_limited_profile) {
+    return callee->highest_osr_comp_level() == CompLevel_full_optimization;
+  }
+  return false;
+}
+
 // Create MDO if necessary.
 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) {
   if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
@@ -378,8 +389,9 @@
 }
 
 // Determine if a method should be compiled with a normal entry point at a different level.
-CompLevel AdvancedThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
-  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
+CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) {
+  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
+                             common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level));
   CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
 
   // If OSR method level is greater than the regular method level, the levels should be
@@ -400,15 +412,16 @@
 
 // Determine if we should do an OSR compilation of a given method.
 CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
+  CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level);
   if (cur_level == CompLevel_none) {
     // If there is a live OSR method that means that we deopted to the interpreter
     // for the transition.
-    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
+    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
     if (osr_level > CompLevel_none) {
       return osr_level;
     }
   }
-  return common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level);
+  return next_level;
 }
 
 // Update the rate and submit compile
@@ -418,10 +431,9 @@
   CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
 }
 
-
 // Handle the invocation event.
 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                                      CompLevel level, TRAPS) {
+                                                      CompLevel level, nmethod* nm, TRAPS) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, THREAD);
   }
@@ -436,32 +448,81 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                                       int bci, CompLevel level, TRAPS) {
+                                                       int bci, CompLevel level, nmethod* nm, TRAPS) {
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, THREAD);
   }
+  // Check if MDO should be created for the inlined method
+  if (should_create_mdo(imh(), level)) {
+    create_mdo(imh, THREAD);
+  }
 
-  // If the method is already compiling, quickly bail out.
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
-    // enough calls.
-    CompLevel cur_level = comp_level(mh());
-    CompLevel next_level = call_event(mh(), cur_level);
-    CompLevel next_osr_level = loop_event(mh(), level);
+  if (is_compilation_enabled()) {
+    CompLevel next_osr_level = loop_event(imh(), level);
+    CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     if (next_osr_level  == CompLevel_limited_profile) {
       next_osr_level = CompLevel_full_profile; // OSRs are supposed to be for very hot methods.
     }
-    next_level = MAX2(next_level,
-                      next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
-    bool is_compiling = false;
-    if (next_level != cur_level) {
-      compile(mh, InvocationEntryBci, next_level, THREAD);
-      is_compiling = true;
+
+    // At the very least compile the OSR version
+    if (!CompileBroker::compilation_is_in_queue(imh, bci)) {
+      // Check if there's a method like that already
+      nmethod* osr_nm = NULL;
+      if (max_osr_level >= next_osr_level) {
+        // There is an osr method already with the same
+        // or greater level, check if it has the bci we need
+        osr_nm = imh->lookup_osr_nmethod_for(bci, next_osr_level, false);
+      }
+      if (osr_nm == NULL) {
+        compile(imh, bci, next_osr_level, THREAD);
+      }
     }
 
-    // Do the OSR version
-    if (!is_compiling && next_osr_level != level) {
-      compile(mh, bci, next_osr_level, THREAD);
+    // Use loop event as an opportunity to also check if there's been
+    // enough calls.
+    CompLevel cur_level, next_level;
+    if (mh() != imh()) { // If there is an enclosing method
+      guarantee(nm != NULL, "Should have nmethod here");
+      cur_level = comp_level(mh());
+      next_level = call_event(mh(), cur_level);
+
+      if (max_osr_level == CompLevel_full_optimization) {
+        // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
+        bool make_not_entrant = false;
+        if (nm->is_osr_method()) {
+          // This is an osr method, just make it not entrant and recompile later if needed
+          make_not_entrant = true;
+        } else {
+          if (next_level != CompLevel_full_optimization) {
+            // next_level is not full opt, so we need to recompile the
+            // enclosing method without the inlinee
+            cur_level = CompLevel_none;
+            make_not_entrant = true;
+          }
+        }
+        if (make_not_entrant) {
+          if (PrintTieredEvents) {
+            int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
+            print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
+          }
+          nm->make_not_entrant();
+        }
+      }
+      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+        // Fix up next_level if necessary to avoid deopts
+        if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
+          next_level = CompLevel_full_profile;
+        }
+        if (cur_level != next_level) {
+          compile(mh, InvocationEntryBci, next_level, THREAD);
+        }
+      }
+    } else {
+      cur_level = comp_level(imh());
+      next_level = call_event(imh(), cur_level);
+      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
+        compile(imh, InvocationEntryBci, next_level, THREAD);
+      }
     }
   }
 }
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -211,14 +211,16 @@
   virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
   // event() from SimpleThresholdPolicy would call these.
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, TRAPS);
+                                       CompLevel level, nmethod* nm, TRAPS);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, TRAPS);
 public:
   AdvancedThresholdPolicy() : _start_time(0) { }
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   virtual void initialize();
+  virtual bool should_not_inline(ciEnv* env, ciMethod* callee);
+
 };
 
 #endif // TIERED
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -37,15 +37,6 @@
 #include "services/management.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/taskqueue.hpp"
-#ifdef TARGET_ARCH_x86
-# include "vm_version_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "vm_version_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "vm_version_zero.hpp"
-#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
 #endif
@@ -251,6 +242,11 @@
   { "UseParallelOldGCDensePrefix",
                            JDK_Version::jdk_update(6,27), JDK_Version::jdk(8) },
   { "AllowTransitionalJSR292",       JDK_Version::jdk(7), JDK_Version::jdk(8) },
+  { "UseCompressedStrings",          JDK_Version::jdk(7), JDK_Version::jdk(8) },
+#ifdef PRODUCT
+  { "DesiredMethodLimit",
+                           JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
+#endif // PRODUCT
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -1680,8 +1676,33 @@
           UseParallelGC || UseParallelOldGC));
 }
 
+// check if do gclog rotation
+// +UseGCLogFileRotation is a must,
+// no gc log rotation when log file not supplied or
+// NumberOfGCLogFiles is 0, or GCLogFileSize is 0
+void check_gclog_consistency() {
+  if (UseGCLogFileRotation) {
+    if ((Arguments::gc_log_filename() == NULL) ||
+        (NumberOfGCLogFiles == 0)  ||
+        (GCLogFileSize == 0)) {
+      jio_fprintf(defaultStream::output_stream(),
+                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
+                  "where num_of_file > 0 and num_of_size > 0\n"
+                  "GC log rotation is turned off\n");
+      UseGCLogFileRotation = false;
+    }
+  }
+
+  if (UseGCLogFileRotation && GCLogFileSize < 8*K) {
+        FLAG_SET_CMDLINE(uintx, GCLogFileSize, 8*K);
+        jio_fprintf(defaultStream::output_stream(),
+                    "GCLogFileSize changed to minimum 8K\n");
+  }
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
+  check_gclog_consistency();
   bool status = true;
   // Ensure that the user has not selected conflicting sets
   // of collectors. [Note: this check is merely a user convenience;
@@ -2672,6 +2693,7 @@
       return JNI_ERR;
     }
   }
+
   // Change the default value for flags  which have different default values
   // when working with older JDKs.
   if (JDK_Version::current().compare_major(6) <= 0 &&
@@ -2886,6 +2908,18 @@
   }
 }
 
+// Disable options not supported in this release, with a warning if they
+// were explicitly requested on the command-line
+#define UNSUPPORTED_OPTION(opt, description)                    \
+do {                                                            \
+  if (opt) {                                                    \
+    if (FLAG_IS_CMDLINE(opt)) {                                 \
+      warning(description " is disabled in this release.");     \
+    }                                                           \
+    FLAG_SET_DEFAULT(opt, false);                               \
+  }                                                             \
+} while(0)
+
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* args) {
@@ -2983,6 +3017,13 @@
     return result;
   }
 
+#ifdef JAVASE_EMBEDDED
+  #ifdef PPC
+    UNSUPPORTED_OPTION(EnableInvokeDynamic, "Invoke dynamic");
+  #endif
+  UNSUPPORTED_OPTION(UseG1GC, "G1 GC");
+#endif
+
 #ifndef PRODUCT
   if (TraceBytecodesAt != 0) {
     TraceBytecodes = true;
--- a/hotspot/src/share/vm/runtime/atomic.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/atomic.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -51,6 +51,12 @@
 #ifdef TARGET_OS_ARCH_windows_x86
 # include "atomic_windows_x86.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "atomic_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "atomic_linux_ppc.inline.hpp"
+#endif
 
 jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
   assert(sizeof(jbyte) == 1, "assumption.");
@@ -83,3 +89,13 @@
   return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
                                        (jint)compare_value);
 }
+
+jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
+  jlong old = load(dest);
+  jlong new_value = old + add_value;
+  while (old != cmpxchg(new_value, dest, old)) {
+    old = load(dest);
+    new_value = old + add_value;
+  }
+  return old;
+}
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -51,6 +51,8 @@
   static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
   static void*    add_ptr(intptr_t add_value, volatile void*     dest);
 
+  static jlong    add    (jlong    add_value, volatile jlong*    dest);
+
   // Atomically increment location
   static void inc    (volatile jint*     dest);
   static void inc_ptr(volatile intptr_t* dest);
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -306,7 +306,7 @@
   return (current >= initial + target);
 }
 
-nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
   assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
   if (JvmtiExport::can_post_interpreter_events()) {
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -62,7 +62,7 @@
   virtual int compiler_count(CompLevel comp_level) = 0;
   // main notification entry, return a pointer to an nmethod if the OSR is required,
   // returns NULL otherwise.
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0;
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) = 0;
   // safepoint() is called at the end of the safepoint
   virtual void do_safepoint_work() = 0;
   // reprofile request
@@ -80,6 +80,7 @@
   virtual bool is_mature(methodOop method) = 0;
   // Do policy initialization
   virtual void initialize() = 0;
+  virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
 };
 
 // A base class for baseline policies.
@@ -101,7 +102,7 @@
   virtual bool is_mature(methodOop method);
   virtual void initialize();
   virtual CompileTask* select_task(CompileQueue* compile_queue);
-  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS);
+  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
   virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
   virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
 };
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -343,6 +343,12 @@
 #define falseInTiered true
 #endif
 
+#ifdef JAVASE_EMBEDDED
+#define falseInEmbedded false
+#else
+#define falseInEmbedded true
+#endif
+
 // develop flags are settable / visible only during development and are constant in the PRODUCT version
 // product flags are always settable / visible
 // notproduct flags are settable / visible only during development and are not declared in the PRODUCT version
@@ -438,6 +444,9 @@
   product(bool, UsePPCLWSYNC, true,                                         \
           "Use lwsync instruction if true, else use slower sync")           \
                                                                             \
+  develop(bool, CleanChunkPoolAsync, falseInEmbedded,                       \
+          "Whether to clean the chunk pool asynchronously")                 \
+                                                                            \
   /* Temporary: See 6948537 */                                             \
   experimental(bool, UseMemSetInBOT, true,                                  \
           "(Unstable) uses memset in BOT updates in GC code")               \
@@ -492,6 +501,9 @@
   product(intx, UseSSE, 99,                                                 \
           "Highest supported SSE instructions set on x86/x64")              \
                                                                             \
+  product(intx, UseVIS, 99,                                                 \
+          "Highest supported VIS instructions set on Sparc")                \
+                                                                            \
   product(uintx, LargePageSizeInBytes, 0,                                   \
           "Large page size (0 to let VM choose the page size")              \
                                                                             \
@@ -1944,6 +1956,9 @@
           "Number of ObjArray elements to push onto the marking stack"      \
           "before pushing a continuation entry")                            \
                                                                             \
+  notproduct(bool, ExecuteInternalVMTests, false,                           \
+          "Enable execution of internal VM tests.")                         \
+                                                                            \
   product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
                                                                             \
   product_pd(bool, ResizeTLAB,                                              \
@@ -2332,6 +2347,20 @@
           "Print diagnostic message when GC is stalled"                     \
           "by JNI critical section")                                        \
                                                                             \
+  /* GC log rotation setting */                                             \
+                                                                            \
+  product(bool, UseGCLogFileRotation, false,                                \
+          "Prevent large gclog file for long running app. "                 \
+          "Requires -Xloggc:<filename>")                                    \
+                                                                            \
+  product(uintx, NumberOfGCLogFiles, 0,                                     \
+          "Number of gclog files in rotation, "                             \
+          "Default: 0, no rotation")                                        \
+                                                                            \
+  product(uintx, GCLogFileSize, 0,                                          \
+          "GC log file size, Default: 0 bytes, no rotation "                \
+          "Only valid with UseGCLogFileRotation")                           \
+                                                                            \
   /* JVMTI heap profiling */                                                \
                                                                             \
   diagnostic(bool, TraceJVMTIObjectTagging, false,                          \
@@ -2909,6 +2938,12 @@
   product(intx, NmethodSweepCheckInterval, 5,                               \
           "Compilers wake up every n seconds to possibly sweep nmethods")   \
                                                                             \
+  notproduct(bool, LogSweeper, false,                                       \
+            "Keep a ring buffer of sweeper activity")                       \
+                                                                            \
+  notproduct(intx, SweeperLogEntries, 1024,                                 \
+            "Number of records in the ring buffer of sweeper activity")     \
+                                                                            \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
                                                                             \
@@ -3588,13 +3623,9 @@
                                                                             \
   /* flags for performance data collection */                               \
                                                                             \
-  NOT_EMBEDDED(product(bool, UsePerfData, true,                             \
+  product(bool, UsePerfData, falseInEmbedded,                               \
           "Flag to disable jvmstat instrumentation for performance testing" \
-          "and problem isolation purposes."))                               \
-                                                                            \
-  EMBEDDED_ONLY(product(bool, UsePerfData, false,                           \
-          "Flag to disable jvmstat instrumentation for performance testing" \
-          "and problem isolation purposes."))                               \
+          "and problem isolation purposes.")                                \
                                                                             \
   product(bool, PerfDataSaveToFile, false,                                  \
           "Save PerfData memory to hsperfdata_<pid> file on exit")          \
@@ -3709,6 +3740,9 @@
   diagnostic(intx, MethodHandlePushLimit, 3,                                \
           "number of additional stack slots a method handle may push")      \
                                                                             \
+  diagnostic(bool, PrintMethodHandleStubs, false,                           \
+          "Print generated stub code for method handles")                   \
+                                                                            \
   develop(bool, TraceMethodHandles, false,                                  \
           "trace internal method handle operations")                        \
                                                                             \
@@ -3718,6 +3752,9 @@
   diagnostic(bool, OptimizeMethodHandles, true,                             \
           "when constructing method handles, try to improve them")          \
                                                                             \
+  develop(bool, StressMethodHandleWalk, false,                              \
+          "Process all method handles with MethodHandleWalk")               \
+                                                                            \
   diagnostic(bool, UseRicochetFrames, true,                                 \
           "use ricochet stack frames for method handle combination, "       \
           "if the platform supports them")                                  \
@@ -3725,7 +3762,7 @@
   experimental(bool, TrustFinalNonStaticFields, false,                      \
           "trust final non-static declarations for constant folding")       \
                                                                             \
-  experimental(bool, AllowInvokeGeneric, true,                              \
+  experimental(bool, AllowInvokeGeneric, false,                             \
           "accept MethodHandle.invoke and MethodHandle.invokeGeneric "      \
           "as equivalent methods")                                          \
                                                                             \
--- a/hotspot/src/share/vm/runtime/java.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -468,12 +468,10 @@
   StatSampler::disengage();
   StatSampler::destroy();
 
-#ifndef SERIALGC
-  // stop CMS threads
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::stop();
-  }
-#endif // SERIALGC
+  // We do not need to explicitly stop concurrent GC threads because the
+  // JVM will be taken down at a safepoint when such threads are inactive --
+  // except for some concurrent G1 threads, see (comment in)
+  // Threads::destroy_vm().
 
   // Print GC/heap related information.
   if (PrintGCDetails) {
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -761,6 +761,7 @@
   // st->print("(active %d)", os::active_processor_count());
   st->print(" %s", VM_Version::cpu_features());
   st->cr();
+  pd_print_cpu_info(st);
 }
 
 void os::print_date_and_time(outputStream *st) {
--- a/hotspot/src/share/vm/runtime/os.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/os.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -480,6 +480,7 @@
   // Output format may be different on different platforms.
   static void print_os_info(outputStream* st);
   static void print_cpu_info(outputStream* st);
+  static void pd_print_cpu_info(outputStream* st);
   static void print_memory_info(outputStream* st);
   static void print_dll_info(outputStream* st);
   static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -511,6 +511,11 @@
 
   TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
   NMethodSweeper::scan_stacks();
+
+  // rotate log files?
+  if (UseGCLogFileRotation) {
+    gclog_or_tty->rotate_log();
+  }
 }
 
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -80,6 +80,72 @@
 #include "c1/c1_Runtime1.hpp"
 #endif
 
+// Shared stub locations
+RuntimeStub*        SharedRuntime::_wrong_method_blob;
+RuntimeStub*        SharedRuntime::_ic_miss_blob;
+RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
+RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
+RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
+
+DeoptimizationBlob* SharedRuntime::_deopt_blob;
+RicochetBlob*       SharedRuntime::_ricochet_blob;
+
+SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
+SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
+
+#ifdef COMPILER2
+UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
+#endif // COMPILER2
+
+
+//----------------------------generate_stubs-----------------------------------
+void SharedRuntime::generate_stubs() {
+  _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),         "wrong_method_stub");
+  _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
+  _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),  "resolve_opt_virtual_call");
+  _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),      "resolve_virtual_call");
+  _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),       "resolve_static_call");
+
+  _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false);
+  _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true);
+
+  generate_ricochet_blob();
+  generate_deopt_blob();
+
+#ifdef COMPILER2
+  generate_uncommon_trap_blob();
+#endif // COMPILER2
+}
+
+//----------------------------generate_ricochet_blob---------------------------
+void SharedRuntime::generate_ricochet_blob() {
+  if (!EnableInvokeDynamic)  return;  // leave it as a null
+
+#ifndef TARGET_ARCH_NYI_6939861
+  // allocate space for the code
+  ResourceMark rm;
+  // setup code generation tools
+  CodeBuffer buffer("ricochet_blob", 256 LP64_ONLY(+ 256), 256);  // XXX x86 LP64L: 512, 512
+  MacroAssembler* masm = new MacroAssembler(&buffer);
+
+  int bounce_offset = -1, exception_offset = -1, frame_size_in_words = -1;
+  MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &bounce_offset, &exception_offset, &frame_size_in_words);
+
+  // -------------
+  // make sure all code is generated
+  masm->flush();
+
+  // failed to generate?
+  if (bounce_offset < 0 || exception_offset < 0 || frame_size_in_words < 0) {
+    assert(false, "bad ricochet blob");
+    return;
+  }
+
+  _ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
+#endif
+}
+
+
 #include <math.h>
 
 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
@@ -88,8 +154,6 @@
 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
                       char*, int, char*, int, char*, int);
 
-RicochetBlob*      SharedRuntime::_ricochet_blob = NULL;
-
 // Implementation of SharedRuntime
 
 #ifndef PRODUCT
@@ -143,6 +207,7 @@
 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 
+
 void SharedRuntime::trace_ic_miss(address at) {
   for (int i = 0; i < _ICmiss_index; i++) {
     if (_ICmiss_at[i] == at) {
@@ -698,6 +763,13 @@
   throw_and_post_jvmti_exception(thread, exception);
 JRT_END
 
+JRT_ENTRY(void, SharedRuntime::throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual))
+  assert(thread == JavaThread::current() && required->is_oop() && actual->is_oop(), "bad args");
+  ResourceMark rm;
+  char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual);
+  throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_invoke_WrongMethodTypeException(), message);
+JRT_END
+
 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
                                                            address pc,
                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -52,28 +52,33 @@
 
   // Shared stub locations
 
-  static RuntimeStub* _wrong_method_blob;
-  static RuntimeStub* _ic_miss_blob;
-  static RuntimeStub* _resolve_opt_virtual_call_blob;
-  static RuntimeStub* _resolve_virtual_call_blob;
-  static RuntimeStub* _resolve_static_call_blob;
+  static RuntimeStub*        _wrong_method_blob;
+  static RuntimeStub*        _ic_miss_blob;
+  static RuntimeStub*        _resolve_opt_virtual_call_blob;
+  static RuntimeStub*        _resolve_virtual_call_blob;
+  static RuntimeStub*        _resolve_static_call_blob;
 
-  static RicochetBlob* _ricochet_blob;
+  static DeoptimizationBlob* _deopt_blob;
+  static RicochetBlob*       _ricochet_blob;
 
-  static SafepointBlob* _polling_page_safepoint_handler_blob;
-  static SafepointBlob* _polling_page_return_handler_blob;
+  static SafepointBlob*      _polling_page_safepoint_handler_blob;
+  static SafepointBlob*      _polling_page_return_handler_blob;
+
 #ifdef COMPILER2
-  static ExceptionBlob*       _exception_blob;
-  static UncommonTrapBlob*    _uncommon_trap_blob;
+  static UncommonTrapBlob*   _uncommon_trap_blob;
 #endif // COMPILER2
 
 #ifndef PRODUCT
-
   // Counters
   static int     _nof_megamorphic_calls;         // total # of megamorphic calls (through vtable)
+#endif // !PRODUCT
 
-#endif // !PRODUCT
+ private:
+  static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return);
+  static RuntimeStub*   generate_resolve_blob(address destination, const char* name);
+
  public:
+  static void generate_stubs(void);
 
   // max bytes for each dtrace string parameter
   enum { max_dtrace_string_size = 256 };
@@ -180,6 +185,7 @@
   static void    throw_NullPointerException(JavaThread* thread);
   static void    throw_NullPointerException_at_call(JavaThread* thread);
   static void    throw_StackOverflowError(JavaThread* thread);
+  static void    throw_WrongMethodTypeException(JavaThread* thread, oopDesc* required, oopDesc* actual);
   static address continuation_for_implicit_exception(JavaThread* thread,
                                                      address faulting_pc,
                                                      ImplicitExceptionKind exception_kind);
@@ -326,12 +332,9 @@
                                      bool is_virtual,
                                      bool is_optimized, TRAPS);
 
-  static void generate_stubs(void);
-
   private:
   // deopt blob
   static void generate_deopt_blob(void);
-  static DeoptimizationBlob* _deopt_blob;
 
   public:
   static DeoptimizationBlob* deopt_blob(void)      { return _deopt_blob; }
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -50,15 +50,18 @@
   case COMPILE:
     tty->print("compile");
     break;
-  case KILL:
-    tty->print("kill");
+  case REMOVE_FROM_QUEUE:
+    tty->print("remove-from-queue");
     break;
-  case UPDATE:
-    tty->print("update");
+  case UPDATE_IN_QUEUE:
+    tty->print("update-in-queue");
     break;
   case REPROFILE:
     tty->print("reprofile");
     break;
+  case MAKE_NOT_ENTRANT:
+    tty->print("make-not-entrant");
+    break;
   default:
     tty->print("unknown");
   }
@@ -68,7 +71,6 @@
   ResourceMark rm;
   char *method_name = mh->name_and_sig_as_C_string();
   tty->print("[%s", method_name);
-  // We can have an inlinee, although currently we don't generate any notifications for the inlined methods.
   if (inlinee_event) {
     char *inlinee_name = imh->name_and_sig_as_C_string();
     tty->print(" [%s]] ", inlinee_name);
@@ -170,7 +172,7 @@
 }
 
 nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
-                                      int branch_bci, int bci, CompLevel comp_level, TRAPS) {
+                                      int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
   if (comp_level == CompLevel_none &&
       JvmtiExport::can_post_interpreter_events()) {
     assert(THREAD->is_Java_thread(), "Should be java thread");
@@ -190,12 +192,13 @@
   }
 
   if (bci == InvocationEntryBci) {
-    method_invocation_event(method, inlinee, comp_level, THREAD);
+    method_invocation_event(method, inlinee, comp_level, nm, THREAD);
   } else {
-    method_back_branch_event(method, inlinee, bci, comp_level, THREAD);
-    int highest_level = method->highest_osr_comp_level();
+    method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
+    // method == inlinee if the event originated in the main method
+    int highest_level = inlinee->highest_osr_comp_level();
     if (highest_level > comp_level) {
-      osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false);
+      osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false);
     }
   }
   return osr_nm;
@@ -323,7 +326,8 @@
 
 // Determine if a method should be compiled with a normal entry point at a different level.
 CompLevel SimpleThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
-  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
+  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
+                             common(&SimpleThresholdPolicy::loop_predicate, method, cur_level));
   CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
 
   // If OSR method level is greater than the regular method level, the levels should be
@@ -344,21 +348,22 @@
 
 // Determine if we should do an OSR compilation of a given method.
 CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
+  CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
   if (cur_level == CompLevel_none) {
     // If there is a live OSR method that means that we deopted to the interpreter
     // for the transition.
-    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
+    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
     if (osr_level > CompLevel_none) {
       return osr_level;
     }
   }
-  return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
+  return next_level;
 }
 
 
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
-                                              CompLevel level, TRAPS) {
+                                              CompLevel level, nmethod* nm, TRAPS) {
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
@@ -370,7 +375,7 @@
 // Handle the back branch event. Notice that we can compile the method
 // with a regular entry from here.
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
-                                               int bci, CompLevel level, TRAPS) {
+                                                     int bci, CompLevel level, nmethod* nm, TRAPS) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
     // Use loop event as an opportinity to also check there's been
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -62,7 +62,7 @@
   void set_c1_count(int x) { _c1_count = x;    }
   void set_c2_count(int x) { _c2_count = x;    }
 
-  enum EventType { CALL, LOOP, COMPILE, KILL, UPDATE, REPROFILE };
+  enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
   void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
@@ -88,9 +88,9 @@
     return CompLevel_none;
   }
   virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
-                                       CompLevel level, TRAPS);
+                                       CompLevel level, nmethod* nm, TRAPS);
   virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
-                                        int bci, CompLevel level, TRAPS);
+                                        int bci, CompLevel level, nmethod* nm, TRAPS);
 public:
   SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
   virtual int compiler_count(CompLevel comp_level) {
@@ -101,17 +101,20 @@
   virtual void do_safepoint_work() { }
   virtual void delay_compilation(methodOop method) { }
   virtual void disable_compilation(methodOop method) { }
-  // TODO: we should honour reprofiling requests in the future. Currently reprofiling
-  // would happen but not to the extent we would ideally like.
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   virtual nmethod* event(methodHandle method, methodHandle inlinee,
-                         int branch_bci, int bci, CompLevel comp_level, TRAPS);
+                         int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
   // Select task is called by CompileBroker. We should return a task or NULL.
   virtual CompileTask* select_task(CompileQueue* compile_queue);
   // Tell the runtime if we think a given method is adequately profiled.
   virtual bool is_mature(methodOop method);
   // Initialize: set compiler thread count
   virtual void initialize();
+  virtual bool should_not_inline(ciEnv* env, ciMethod* callee) {
+    return (env->comp_level() == CompLevel_limited_profile ||
+            env->comp_level() == CompLevel_full_profile) &&
+            callee->has_loops();
+  }
 };
 
 #endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -80,9 +80,10 @@
 
 // Implementation of StubCodeGenerator
 
-StubCodeGenerator::StubCodeGenerator(CodeBuffer* code) {
+StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) {
   _masm = new MacroAssembler(code);
   _first_stub = _last_stub = NULL;
+  _print_code = print_code;
 }
 
 extern "C" {
@@ -94,7 +95,7 @@
 }
 
 StubCodeGenerator::~StubCodeGenerator() {
-  if (PrintStubCode) {
+  if (PrintStubCode || _print_code) {
     CodeBuffer* cbuf = _masm->code();
     CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
     if (blob != NULL) {
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -98,9 +98,10 @@
 
   StubCodeDesc* _first_stub;
   StubCodeDesc* _last_stub;
+  bool _print_code;
 
  public:
-  StubCodeGenerator(CodeBuffer* code);
+  StubCodeGenerator(CodeBuffer* code, bool print_code = false);
   ~StubCodeGenerator();
 
   MacroAssembler* assembler() const              { return _masm; }
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -55,6 +55,7 @@
 address StubRoutines::_throw_NullPointerException_entry         = NULL;
 address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
 address StubRoutines::_throw_StackOverflowError_entry           = NULL;
+address StubRoutines::_throw_WrongMethodTypeException_entry     = NULL;
 address StubRoutines::_handler_for_unsafe_access_entry          = NULL;
 jint    StubRoutines::_verify_oop_count                         = 0;
 address StubRoutines::_verify_oop_subroutine_entry              = NULL;
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -132,6 +132,7 @@
   static address _throw_NullPointerException_entry;
   static address _throw_NullPointerException_at_call_entry;
   static address _throw_StackOverflowError_entry;
+  static address _throw_WrongMethodTypeException_entry;
   static address _handler_for_unsafe_access_entry;
 
   static address _atomic_xchg_entry;
@@ -254,6 +255,7 @@
   static address throw_NullPointerException_entry()        { return _throw_NullPointerException_entry; }
   static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
   static address throw_StackOverflowError_entry()          { return _throw_StackOverflowError_entry; }
+  static address throw_WrongMethodTypeException_entry()    { return _throw_WrongMethodTypeException_entry; }
 
   // Exceptions during unsafe access - should throw Java exception rather
   // than crash.
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -37,6 +37,94 @@
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
 
+#ifdef ASSERT
+
+#define SWEEP(nm) record_sweep(nm, __LINE__)
+// Sweeper logging code
+class SweeperRecord {
+ public:
+  int traversal;
+  int invocation;
+  int compile_id;
+  long traversal_mark;
+  int state;
+  const char* kind;
+  address vep;
+  address uep;
+  int line;
+
+  void print() {
+      tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
+                    PTR_FORMAT " state = %d traversal_mark %d line = %d",
+                    traversal,
+                    invocation,
+                    compile_id,
+                    kind == NULL ? "" : kind,
+                    uep,
+                    vep,
+                    state,
+                    traversal_mark,
+                    line);
+  }
+};
+
+static int _sweep_index = 0;
+static SweeperRecord* _records = NULL;
+
+void NMethodSweeper::report_events(int id, address entry) {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+  }
+}
+
+void NMethodSweeper::report_events() {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+  }
+}
+
+void NMethodSweeper::record_sweep(nmethod* nm, int line) {
+  if (_records != NULL) {
+    _records[_sweep_index].traversal = _traversals;
+    _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
+    _records[_sweep_index].invocation = _invocations;
+    _records[_sweep_index].compile_id = nm->compile_id();
+    _records[_sweep_index].kind = nm->compile_kind();
+    _records[_sweep_index].state = nm->_state;
+    _records[_sweep_index].vep = nm->verified_entry_point();
+    _records[_sweep_index].uep = nm->entry_point();
+    _records[_sweep_index].line = line;
+
+    _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
+  }
+}
+#else
+#define SWEEP(nm)
+#endif
+
+
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
@@ -137,6 +225,13 @@
     if (old != 0) {
       return;
     }
+#ifdef ASSERT
+    if (LogSweeper && _records == NULL) {
+      // Create the ring buffer for the logging code
+      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
+      memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
+    }
+#endif
     if (_invocations > 0) {
       sweep_code_cache();
       _invocations--;
@@ -213,10 +308,29 @@
   }
 }
 
+class NMethodMarker: public StackObj {
+ private:
+  CompilerThread* _thread;
+ public:
+  NMethodMarker(nmethod* nm) {
+    _thread = CompilerThread::current();
+    _thread->set_scanned_nmethod(nm);
+  }
+  ~NMethodMarker() {
+    _thread->set_scanned_nmethod(NULL);
+  }
+};
+
 
 void NMethodSweeper::process_nmethod(nmethod *nm) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
+  // Make sure this nmethod doesn't get unloaded during the scan,
+  // since the locks acquired below might safepoint.
+  NMethodMarker nmm(nm);
+
+  SWEEP(nm);
+
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
@@ -224,8 +338,10 @@
       // Clean-up all inline caches that points to zombie/non-reentrant methods
       MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
+      SWEEP(nm);
     } else {
       _locked_seen++;
+      SWEEP(nm);
     }
     return;
   }
@@ -247,6 +363,7 @@
       }
       nm->mark_for_reclamation();
       _rescan = true;
+      SWEEP(nm);
     }
   } else if (nm->is_not_entrant()) {
     // If there is no current activations of this method on the
@@ -257,6 +374,7 @@
       }
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
@@ -265,6 +383,7 @@
       // request a rescan.  If this method stays on the stack for a
       // long time we don't want to keep rescanning the code cache.
       _not_entrant_seen_on_stack++;
+      SWEEP(nm);
     }
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
@@ -273,10 +392,12 @@
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      SWEEP(nm);
       nm->flush();
     } else {
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     }
   } else {
     assert(nm->is_alive(), "should be alive");
@@ -293,6 +414,7 @@
     // Clean-up all inline caches that points to zombie/non-reentrant methods
     MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
+    SWEEP(nm);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -57,6 +57,13 @@
  public:
   static long traversal_count() { return _traversals; }
 
+#ifdef ASSERT
+  // Keep track of sweeper activity in the ring buffer
+  static void record_sweep(nmethod* nm, int line);
+  static void report_events(int id, address entry);
+  static void report_events();
+#endif
+
   static void scan_stacks();      // Invoked at the end of each safepoint
   static void sweep_code_cache(); // Concurrent part of sweep job
   static void possibly_sweep();   // Compiler threads call this to sweep
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -2861,6 +2861,7 @@
 }
 
 
+#ifdef ASSERT
 // Print or validate the layout of stack frames
 void JavaThread::print_frame_layout(int depth, bool validate_only) {
   ResourceMark rm;
@@ -2878,7 +2879,7 @@
     values.print();
   }
 }
-
+#endif
 
 void JavaThread::trace_stack_from(vframe* start_vf) {
   ResourceMark rm;
@@ -2942,12 +2943,22 @@
   _queue = queue;
   _counters = counters;
   _buffer_blob = NULL;
+  _scanned_nmethod = NULL;
 
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
 #endif
 }
 
+void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+  JavaThread::oops_do(f, cf);
+  if (_scanned_nmethod != NULL && cf != NULL) {
+    // Safepoints can occur when the sweeper is scanning an nmethod so
+    // process it here to make sure it isn't unloaded in the middle of
+    // a scan.
+    cf->do_code_blob(_scanned_nmethod);
+  }
+}
 
 // ======= Threads ========
 
@@ -3336,7 +3347,9 @@
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
-  Chunk::start_chunk_pool_cleaner_task();
+  if (CleanChunkPoolAsync) {
+    Chunk::start_chunk_pool_cleaner_task();
+  }
 
   // initialize compiler(s)
   CompileBroker::compilation_init();
@@ -3687,6 +3700,14 @@
     // heap is unparseable if they are caught. Grab the Heap_lock
     // to prevent this. The GC vm_operations will not be able to
     // queue until after the vm thread is dead.
+    // After this point, we'll never emerge out of the safepoint before
+    // the VM exits, so concurrent GC threads do not need to be explicitly
+    // stopped; they remain inactive until the process exits.
+    // Note: some concurrent G1 threads may be running during a safepoint,
+    // but these will not be accessing the heap, just some G1-specific side
+    // data structures that are not accessed by any other threads but them
+    // after this point in a terminal safepoint.
+
     MutexLocker ml(Heap_lock);
 
     VMThread::wait_for_vm_thread_exit();
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -439,7 +439,7 @@
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+  virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
@@ -1381,7 +1381,7 @@
   void trace_frames()                            PRODUCT_RETURN;
 
   // Print an annotated view of the stack frames
-  void print_frame_layout(int depth = 0, bool validate_only = false) PRODUCT_RETURN;
+  void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
   void validate_frame_layout() {
     print_frame_layout(0, true);
   }
@@ -1698,6 +1698,8 @@
   CompileQueue* _queue;
   BufferBlob*   _buffer_blob;
 
+  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
+
  public:
 
   static CompilerThread* current();
@@ -1726,6 +1728,11 @@
     _log = log;
   }
 
+  // GC support
+  // Apply "f->do_oop" to all root oops in "this".
+  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+
 #ifndef PRODUCT
 private:
   IdealGraphPrinter *_ideal_graph_printer;
@@ -1737,6 +1744,12 @@
   // Get/set the thread's current task
   CompileTask*  task()                           { return _task; }
   void          set_task(CompileTask* task)      { _task = task; }
+
+  // Track the nmethod currently being scanned by the sweeper
+  void          set_scanned_nmethod(nmethod* nm) {
+    assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
+    _scanned_nmethod = nm;
+  }
 };
 
 inline CompilerThread* CompilerThread::current() {
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -783,6 +783,7 @@
   nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
+  nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
@@ -800,6 +801,8 @@
   nonstatic_field(nmethod,             _osr_entry_point,                              address)                               \
   nonstatic_field(nmethod,             _lock_count,                                   jint)                                  \
   nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
+  nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
+  nonstatic_field(nmethod,             _marked_for_deoptimization,                    bool)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* JavaCalls (NOTE: incomplete) */                                                                                                 \
@@ -1310,11 +1313,13 @@
                                                                           \
   declare_toplevel_type(CodeBlob)                                         \
   declare_type(BufferBlob,            CodeBlob)                           \
-  declare_type(nmethod,       CodeBlob)                           \
+  declare_type(AdapterBlob,           BufferBlob)                         \
+  declare_type(nmethod,               CodeBlob)                           \
   declare_type(RuntimeStub,           CodeBlob)                           \
   declare_type(SingletonBlob,         CodeBlob)                           \
   declare_type(SafepointBlob,         SingletonBlob)                      \
   declare_type(DeoptimizationBlob,    SingletonBlob)                      \
+  declare_type(RicochetBlob,          SingletonBlob)                      \
   declare_c2_type(ExceptionBlob,      SingletonBlob)                      \
   declare_c2_type(UncommonTrapBlob,   CodeBlob)                           \
                                                                           \
--- a/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,11 +161,11 @@
 
   // Set or clear the specified bit.
   inline void set_bit(idx_t bit);
-  void clear_bit(idx_t bit);
+  inline void clear_bit(idx_t bit);
 
   // Atomically set or clear the specified bit.
-  bool par_set_bit(idx_t bit);
-  bool par_clear_bit(idx_t bit);
+  inline bool par_set_bit(idx_t bit);
+  inline bool par_clear_bit(idx_t bit);
 
   // Put the given value at the given offset. The parallel version
   // will CAS the value into the bitmap and is quite a bit slower.
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -349,7 +349,7 @@
 fileStream::~fileStream() {
   if (_file != NULL) {
     if (_need_close) fclose(_file);
-    _file = NULL;
+    _file      = NULL;
   }
 }
 
@@ -377,6 +377,86 @@
   update_position(s, len);
 }
 
+rotatingFileStream::~rotatingFileStream() {
+  if (_file != NULL) {
+    if (_need_close) fclose(_file);
+    _file      = NULL;
+    FREE_C_HEAP_ARRAY(char, _file_name);
+    _file_name = NULL;
+  }
+}
+
+rotatingFileStream::rotatingFileStream(const char* file_name) {
+  _cur_file_num = 0;
+  _bytes_writen = 0L;
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
+  _file = fopen(_file_name, "w");
+  _need_close = true;
+}
+
+rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
+  _cur_file_num = 0;
+  _bytes_writen = 0L;
+  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10);
+  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
+  _file = fopen(_file_name, opentype);
+  _need_close = true;
+}
+
+void rotatingFileStream::write(const char* s, size_t len) {
+  if (_file != NULL)  {
+    // Make an unused local variable to avoid warning from gcc 4.x compiler.
+    size_t count = fwrite(s, 1, len, _file);
+    Atomic::add((jlong)count, &_bytes_writen);
+  }
+  update_position(s, len);
+}
+
+// rotate_log must be called from VMThread at safepoint. In case need change parameters
+// for gc log rotation from thread other than VMThread, a sub type of VM_Operation
+// should be created and be submitted to VMThread's operation queue. DO NOT call this
+// function directly. Currently, it is safe to rotate log at safepoint through VMThread.
+// That is, no mutator threads and concurrent GC threads run parallel with VMThread to
+// write to gc log file at safepoint. If in future, changes made for mutator threads or
+// concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
+// must be synchronized.
+void rotatingFileStream::rotate_log() {
+  if (_bytes_writen < (jlong)GCLogFileSize) return;
+#ifdef ASSERT
+  Thread *thread = Thread::current();
+  assert(thread == NULL ||
+         (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()),
+         "Must be VMThread at safepoint");
+#endif
+  if (NumberOfGCLogFiles == 1) {
+    // rotate in same file
+    rewind();
+    _bytes_writen = 0L;
+    return;
+  }
+
+  // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
+  // close current file, rotate to next file
+  if (_file != NULL) {
+    _cur_file_num ++;
+    if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
+    jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
+             Arguments::gc_log_filename(), _cur_file_num);
+    fclose(_file);
+    _file = NULL;
+  }
+  _file = fopen(_file_name, "w");
+  if (_file != NULL) {
+    _bytes_writen = 0L;
+    _need_close = true;
+  } else {
+    tty->print_cr("failed to open rotation log file %s due to %s\n",
+                  _file_name, strerror(errno));
+    _need_close = false;
+  }
+}
+
 defaultStream* defaultStream::instance = NULL;
 int defaultStream::_output_fd = 1;
 int defaultStream::_error_fd  = 2;
@@ -749,14 +829,17 @@
 
   gclog_or_tty = tty; // default to tty
   if (Arguments::gc_log_filename() != NULL) {
-    fileStream * gclog = new(ResourceObj::C_HEAP)
-                           fileStream(Arguments::gc_log_filename());
+    fileStream * gclog  = UseGCLogFileRotation ?
+                          new(ResourceObj::C_HEAP)
+                             rotatingFileStream(Arguments::gc_log_filename()) :
+                          new(ResourceObj::C_HEAP)
+                             fileStream(Arguments::gc_log_filename());
     if (gclog->is_open()) {
       // now we update the time stamp of the GC log to be synced up
       // with tty.
       gclog->time_stamp().update_to(tty->time_stamp().ticks());
-      gclog_or_tty = gclog;
     }
+    gclog_or_tty = gclog;
   }
 
   // If we haven't lazily initialized the logfile yet, do it now,
--- a/hotspot/src/share/vm/utilities/ostream.hpp	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/src/share/vm/utilities/ostream.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -110,14 +110,15 @@
    // flushing
    virtual void flush() {}
    virtual void write(const char* str, size_t len) = 0;
-   virtual ~outputStream() {}  // close properly on deletion
+   virtual void rotate_log() {} // GC log rotation
+   virtual ~outputStream() {}   // close properly on deletion
 
    void dec_cr() { dec(); cr(); }
    void inc_cr() { inc(); cr(); }
 };
 
 // standard output
-                                // ANSI C++ name collision
+// ANSI C++ name collision
 extern outputStream* tty;           // tty output
 extern outputStream* gclog_or_tty;  // stream for gc log if -Xloggc:<f>, or tty
 
@@ -176,6 +177,7 @@
   FILE* _file;
   bool  _need_close;
  public:
+  fileStream() { _file = NULL; _need_close = false; }
   fileStream(const char* file_name);
   fileStream(const char* file_name, const char* opentype);
   fileStream(FILE* file) { _file = file; _need_close = false; }
@@ -210,6 +212,20 @@
   void flush() {};
 };
 
+class rotatingFileStream : public fileStream {
+ protected:
+  char*  _file_name;
+  jlong  _bytes_writen;
+  uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
+ public:
+  rotatingFileStream(const char* file_name);
+  rotatingFileStream(const char* file_name, const char* opentype);
+  rotatingFileStream(FILE* file) : fileStream(file) {}
+  ~rotatingFileStream();
+  virtual void write(const char* c, size_t len);
+  virtual void rotate_log();
+};
+
 void ostream_init();
 void ostream_init_log();
 void ostream_exit();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/quickSort.cpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/quickSort.hpp"
+
+#ifndef PRODUCT
+
+// Unit tests
+
+#include "runtime/os.hpp"
+#include <stdlib.h>
+
+static int test_comparator(int a, int b) {
+  if (a == b) {
+    return 0;
+  }
+  if (a < b) {
+    return -1;
+  }
+  return 1;
+}
+
+static int test_even_odd_comparator(int a, int b) {
+  bool a_is_odd = (a % 2) == 1;
+  bool b_is_odd = (b % 2) == 1;
+  if (a_is_odd == b_is_odd) {
+    return 0;
+  }
+  if (a_is_odd) {
+    return -1;
+  }
+  return 1;
+}
+
+static int test_stdlib_comparator(const void* a, const void* b) {
+  int ai = *(int*)a;
+  int bi = *(int*)b;
+  if (ai == bi) {
+    return 0;
+  }
+  if (ai < bi) {
+    return -1;
+  }
+  return 1;
+}
+
+void QuickSort::print_array(const char* prefix, int* array, int length) {
+  tty->print("%s:", prefix);
+  for (int i = 0; i < length; i++) {
+    tty->print(" %d", array[i]);
+  }
+  tty->print_cr("");
+}
+
+bool QuickSort::compare_arrays(int* actual, int* expected, int length) {
+  for (int i = 0; i < length; i++) {
+    if (actual[i] != expected[i]) {
+      print_array("Sorted array  ", actual, length);
+      print_array("Expected array", expected, length);
+      return false;
+    }
+  }
+  return true;
+}
+
+template <class C>
+bool QuickSort::sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent) {
+  sort<int, C>(arrayToSort, length, comparator, idempotent);
+  return compare_arrays(arrayToSort, expectedResult, length);
+}
+
+bool QuickSort::test_quick_sort() {
+  tty->print_cr("test_quick_sort\n");
+  {
+    int* test_array = NULL;
+    int* expected_array = NULL;
+    assert(sort_and_compare(test_array, expected_array, 0, test_comparator), "Empty array not handled");
+  }
+  {
+    int test_array[] = {3};
+    int expected_array[] = {3};
+    assert(sort_and_compare(test_array, expected_array, 1, test_comparator), "Single value array not handled");
+  }
+  {
+    int test_array[] = {3,2};
+    int expected_array[] = {2,3};
+    assert(sort_and_compare(test_array, expected_array, 2, test_comparator), "Array with 2 values not correctly sorted");
+  }
+  {
+    int test_array[] = {3,2,1};
+    int expected_array[] = {1,2,3};
+    assert(sort_and_compare(test_array, expected_array, 3, test_comparator), "Array with 3 values not correctly sorted");
+  }
+  {
+    int test_array[] = {4,3,2,1};
+    int expected_array[] = {1,2,3,4};
+    assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "Array with 4 values not correctly sorted");
+  }
+  {
+    int test_array[] = {7,1,5,3,6,9,8,2,4,0};
+    int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
+    assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Array with 10 values not correctly sorted");
+  }
+  {
+    int test_array[] = {4,4,1,4};
+    int expected_array[] = {1,4,4,4};
+    assert(sort_and_compare(test_array, expected_array, 4, test_comparator), "3 duplicates not sorted correctly");
+  }
+  {
+    int test_array[] = {0,1,2,3,4,5,6,7,8,9};
+    int expected_array[] = {0,1,2,3,4,5,6,7,8,9};
+    assert(sort_and_compare(test_array, expected_array, 10, test_comparator), "Already sorted array not correctly sorted");
+  }
+  {
+    // one of the random arrays that found an issue in the partion method.
+    int test_array[] = {76,46,81,8,64,56,75,11,51,55,11,71,59,27,9,64,69,75,21,25,39,40,44,32,7,8,40,41,24,78,24,74,9,65,28,6,40,31,22,13,27,82};
+    int expected_array[] = {6,7,8,8,9,9,11,11,13,21,22,24,24,25,27,27,28,31,32,39,40,40,40,41,44,46,51,55,56,59,64,64,65,69,71,74,75,75,76,78,81,82};
+    assert(sort_and_compare(test_array, expected_array, 42, test_comparator), "Not correctly sorted");
+  }
+  {
+    int test_array[] = {2,8,1,4};
+    int expected_array[] = {1,4,2,8};
+    assert(sort_and_compare(test_array, expected_array, 4, test_even_odd_comparator), "Even/odd not sorted correctly");
+  }
+  {  // Some idempotent tests
+    {
+      // An array of lenght 3 is only sorted by find_pivot. Make sure that it is idempotent.
+      int test_array[] = {1,4,8};
+      int expected_array[] = {1,4,8};
+      assert(sort_and_compare(test_array, expected_array, 3, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {1,7,9,4,8,2};
+      int expected_array[] = {1,7,9,4,8,2};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {1,9,7,4,2,8};
+      int expected_array[] = {1,9,7,4,2,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {7,9,1,2,8,4};
+      int expected_array[] = {7,9,1,2,8,4};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {7,1,9,2,4,8};
+      int expected_array[] = {7,1,9,2,4,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {9,1,7,4,8,2};
+      int expected_array[] = {9,1,7,4,8,2};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+    {
+      int test_array[] = {9,7,1,4,2,8};
+      int expected_array[] = {9,7,1,4,2,8};
+      assert(sort_and_compare(test_array, expected_array, 6, test_even_odd_comparator, true), "Even/odd not idempotent");
+    }
+  }
+
+  // test sorting random arrays
+  for (int i = 0; i < 1000; i++) {
+    int length = os::random() % 100;
+    int* test_array = new int[length];
+    int* expected_array = new int[length];
+    for (int j = 0; j < length; j++) {
+        // Choose random values, but get a chance of getting duplicates
+        test_array[j] = os::random() % (length * 2);
+        expected_array[j] = test_array[j];
+    }
+
+    // Compare sorting to stdlib::qsort()
+    qsort(expected_array, length, sizeof(int), test_stdlib_comparator);
+    assert(sort_and_compare(test_array, expected_array, length, test_comparator), "Random array not correctly sorted");
+
+    // Make sure sorting is idempotent.
+    // Both test_array and expected_array are sorted by the test_comparator.
+    // Now sort them once with the test_even_odd_comparator. Then sort the
+    // test_array one more time with test_even_odd_comparator and verify that
+    // it is idempotent.
+    sort(expected_array, length, test_even_odd_comparator, true);
+    sort(test_array, length, test_even_odd_comparator, true);
+    assert(compare_arrays(test_array, expected_array, length), "Sorting identical arrays rendered different results");
+    sort(test_array, length, test_even_odd_comparator, true);
+    assert(compare_arrays(test_array, expected_array, length), "Sorting already sorted array changed order of elements - not idempotent");
+
+    delete[] test_array;
+    delete[] expected_array;
+  }
+  return true;
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/quickSort.hpp	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_QUICKSORT_HPP
+#define SHARE_VM_UTILITIES_QUICKSORT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+class QuickSort : AllStatic {
+
+ private:
+  template<class T>
+  static void swap(T* array, int x, int y) {
+    T tmp = array[x];
+    array[x] = array[y];
+    array[y] = tmp;
+  }
+
+  // As pivot we use the median of the first, last and middle elements.
+  // We swap in these three values at the right place in the array. This
+  // means that this method not only returns the index of the pivot
+  // element. It also alters the array so that:
+  //     array[first] <= array[middle] <= array[last]
+  // A side effect of this is that arrays of length <= 3 are sorted.
+  template<class T, class C>
+  static int find_pivot(T* array, int length, C comparator) {
+    assert(length > 1, "length of array must be > 0");
+
+    int middle_index = length / 2;
+    int last_index = length - 1;
+
+    if (comparator(array[0], array[middle_index]) == 1) {
+      swap(array, 0, middle_index);
+    }
+    if (comparator(array[0], array[last_index]) == 1) {
+      swap(array, 0, last_index);
+    }
+    if (comparator(array[middle_index], array[last_index]) == 1) {
+      swap(array, middle_index, last_index);
+    }
+    // Now the value in the middle of the array is the median
+    // of the fist, last and middle values. Use this as pivot.
+    return middle_index;
+  }
+
+  template<class T, class C, bool idempotent>
+  static int partition(T* array, int pivot, int length, C comparator) {
+    int left_index = -1;
+    int right_index = length;
+    T pivot_val = array[pivot];
+
+    while (true) {
+      do {
+        left_index++;
+      } while (comparator(array[left_index], pivot_val) == -1);
+      do {
+        right_index--;
+      } while (comparator(array[right_index], pivot_val) == 1);
+
+      if (left_index < right_index) {
+        if (!idempotent || comparator(array[left_index], array[right_index]) != 0) {
+          swap(array, left_index, right_index);
+        }
+      } else {
+        return right_index;
+      }
+    }
+
+    ShouldNotReachHere();
+    return 0;
+  }
+
+  template<class T, class C, bool idempotent>
+  static void inner_sort(T* array, int length, C comparator) {
+    if (length < 2) {
+      return;
+    }
+    int pivot = find_pivot(array, length, comparator);
+    if (length < 4) {
+      // arrays up to length 3 will be sorted after finding the pivot
+      return;
+    }
+    int split = partition<T, C, idempotent>(array, pivot, length, comparator);
+    int first_part_length = split + 1;
+    inner_sort<T, C, idempotent>(array, first_part_length, comparator);
+    inner_sort<T, C, idempotent>(&array[first_part_length], length - first_part_length, comparator);
+  }
+
+ public:
+  // The idempotent parameter prevents the sort from
+  // reordering a previous valid sort by not swapping
+  // fields that compare as equal. This requires extra
+  // calls to the comparator, so the performance
+  // impact depends on the comparator.
+  template<class T, class C>
+  static void sort(T* array, int length, C comparator, bool idempotent) {
+    // Switch "idempotent" from function paramter to template parameter
+    if (idempotent) {
+      inner_sort<T, C, true>(array, length, comparator);
+    } else {
+      inner_sort<T, C, false>(array, length, comparator);
+    }
+  }
+
+  // for unit testing
+#ifndef PRODUCT
+  static void print_array(const char* prefix, int* array, int length);
+  static bool compare_arrays(int* actual, int* expected, int length);
+  template <class C> static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false);
+  static bool test_quick_sort();
+#endif
+};
+
+
+#endif //SHARE_VM_UTILITIES_QUICKSORT_HPP
--- a/hotspot/test/compiler/5091921/Test6890943.sh	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/test/compiler/5091921/Test6890943.sh	Wed Aug 17 22:47:12 2011 -0700
@@ -52,7 +52,7 @@
 
 ${TESTJAVA}/bin/javac -d . Test6890943.java
 
-${TESTJAVA}/bin/java  ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
+${TESTJAVA}/bin/java -XX:-PrintVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
 
 diff output6890943.txt test.out
 
--- a/hotspot/test/compiler/5091921/Test7005594.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/hotspot/test/compiler/5091921/Test7005594.java	Wed Aug 17 22:47:12 2011 -0700
@@ -27,7 +27,7 @@
  * @bug 7005594
  * @summary Array overflow not handled correctly with loop optimzations
  *
- * @run main/othervm -Xms2048m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594
+ * @run shell Test7005594.sh
  */
 
 public class Test7005594 {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/5091921/Test7005594.sh	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,80 @@
+#!/bin/sh
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTCLASSES=${TESTCLASSES}"
+echo "CLASSPATH=${CLASSPATH}"
+
+set -x
+
+cp ${TESTSRC}/Test7005594.java .
+cp ${TESTSRC}/Test7005594.sh .
+
+${TESTJAVA}/bin/javac -d . Test7005594.java
+
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+
+result=$?
+
+cat test.out
+
+if [ $result -eq 95 ]
+then
+  echo "Passed"
+  exit 0
+fi
+
+if [ $result -eq 97 ]
+then
+  echo "Failed"
+  exit 1
+fi
+
+# The test should pass when no enough space for object heap
+grep "Could not reserve enough space for object heap" test.out
+if [ $? = 0 ]
+then
+  echo "Passed"
+  exit 0
+else
+  echo "Failed"
+  exit 1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6478991/NullCheckTest.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6478991
+ * @summary C1 NullCheckEliminator yields incorrect exceptions
+ *
+ * @run main/othervm -XX:CompileOnly=NullCheckTest.test,NullCheckTest.inlined  -Xcomp NullCheckTest
+ */
+
+public class NullCheckTest {
+        static class A {
+                int f;
+
+                public final void inlined(A a) {
+                        // This cast is intended to fail.
+                        B b = ((B) a);
+                }
+        }
+
+        static class B extends A {
+        }
+
+
+        private static void test(A a1, A a2) {
+                // Inlined call must do a null check on a1.
+                // However, the exlipcit NullCheck instruction is eliminated and
+                // the null check is folded into the field load below, so the
+                // exception in the inlined method is thrown before the null check
+                // and the NullPointerException is not thrown.
+                a1.inlined(a2);
+
+                int x = a1.f;
+        }
+
+        public static void main(String[] args) {
+                // load classes
+                new B();
+                try {
+                        test(null, new A());
+
+                        throw new InternalError("FAILURE: no exception");
+                } catch (NullPointerException ex) {
+                        System.out.println("CORRECT: NullPointerException");
+                } catch (ClassCastException ex) {
+                        System.out.println("FAILURE: ClassCastException");
+                        throw ex;
+                }
+        }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6956668/Test6956668.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6956668
+ * @summary misbehavior of XOR operator (^) with int
+ *
+ * @run main/othervm -Xbatch Test6956668
+ */
+
+
+public class Test6956668 {
+
+   public static int bitTest() {
+      int result = 0;
+
+      int testValue = 73;
+      int bitCount = Integer.bitCount(testValue);
+
+      if (testValue != 0) {
+         int gap = Long.numberOfTrailingZeros(testValue);
+         testValue >>>= gap;
+
+         while (testValue != 0) {
+            result++;
+
+            if ((testValue ^= 0x1) != 0) {
+               gap = Long.numberOfTrailingZeros(testValue);
+               testValue >>>= gap;
+            }
+         }
+      }
+
+      if (bitCount != result) {
+         System.out.println("ERROR: " + bitCount + " != " + result);
+         System.exit(97);
+      }
+
+      return (result);
+   }
+
+   public static void main(String[] args) {
+      for (int i = 0; i < 100000; i++) {
+         int ct = bitTest();
+      }
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6990212/Test6990212.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6990212
+ * @summary JSR 292 JVMTI MethodEnter hook is not called for JSR 292 bootstrap and target methods
+ *
+ * @run main Test6990212
+ */
+
+import java.lang.invoke.*;
+
+interface intf {
+    public Object target();
+}
+
+public class Test6990212 implements intf {
+    public Object target() {
+        return null;
+    }
+
+    public static void main(String[] args) throws Throwable {
+        // Build an interface invoke and then invoke it on something
+        // that doesn't implement the interface to test the
+        // raiseException path.
+        MethodHandle target = MethodHandles.lookup().findVirtual(intf.class, "target",  MethodType.methodType(Object.class));
+        try {
+            target.invoke(new Object());
+        } catch (ClassCastException cce) {
+            // everything is ok
+            System.out.println("got expected ClassCastException");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7044738/Test7044738.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7044738
+ * @summary Loop unroll optimization causes incorrect result
+ *
+ * @run main/othervm -Xbatch Test7044738
+ */
+
+public class Test7044738 {
+
+  private static final int INITSIZE = 10000;
+  public int d[] = { 1, 2, 3, 4 };
+  public int i, size;
+
+  private static int iter = 5;
+
+  boolean done() { return (--iter > 0); }
+
+  public static void main(String args[]) {
+    Test7044738 t = new Test7044738();
+    t.test();
+  }
+
+  int test() {
+
+    while (done()) {
+      size = INITSIZE;
+
+      for (i = 0; i < size; i++) {
+        d[0] = d[1]; // 2
+        d[1] = d[2]; // 3
+        d[2] = d[3]; // 4
+        d[3] = d[0]; // 2
+
+        d[0] = d[1]; // 3
+        d[1] = d[2]; // 4
+        d[2] = d[3]; // 2
+        d[3] = d[0]; // 3
+
+        d[0] = d[1]; // 4
+        d[1] = d[2]; // 2
+        d[2] = d[3]; // 3
+        d[3] = d[0]; // 4
+
+        d[0] = d[1]; // 2
+        d[1] = d[2]; // 3
+        d[2] = d[3]; // 4
+        d[3] = d[0]; // 2
+
+        d[0] = d[1]; // 3
+        d[1] = d[2]; // 4
+        d[2] = d[3]; // 2
+        d[3] = d[0]; // 3
+
+        d[0] = d[1]; // 4
+        d[1] = d[2]; // 2
+        d[2] = d[3]; // 3
+        d[3] = d[0]; // 4
+
+        d[0] = d[1]; // 2
+        d[1] = d[2]; // 3
+        d[2] = d[3]; // 4
+        d[3] = d[0]; // 2
+
+        d[0] = d[1]; // 3
+        d[1] = d[2]; // 4
+        d[2] = d[3]; // 2
+        d[3] = d[0]; // 3
+      }
+
+      // try to defeat dead code elimination
+      if (d[0] == d[1]) {
+        System.out.println("test failed: iter=" + iter + "  i=" + i + " d[] = { " + d[0] + ", " + d[1] + ", " + d[2] + ", " + d[3] + " } ");
+        System.exit(97);
+      }
+    }
+    return d[3];
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7046096/Test7046096.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7046096
+ * @summary SEGV IN C2 WITH 6U25
+ *
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat Test7046096
+ */
+
+
+public class Test7046096 {
+
+  static int first = 1;
+
+  String add(String str) {
+    if (first != 0) {
+      return str + "789";
+    } else {
+      return "null";
+    }
+  }
+
+  String test(String str) {
+    for (int i=0; i < first; i++) {
+      if (i > 1)
+        return "bad";
+    }
+    return add(str+"456");
+  }
+
+  public static void main(String [] args) {
+    Test7046096 t = new Test7046096();
+    for (int i = 0; i < 11000; i++) {
+      String str = t.test("123");
+      if (!str.equals("123456789")) {
+        System.out.println("FAILED: " + str + " != \"123456789\"");
+        System.exit(97);
+      }
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7047069/Test7047069.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7047069
+ * @summary Array can dynamically change size when assigned to an object field
+ *
+ * @run main/othervm -Xbatch Test7047069
+ */
+
+import java.util.*;
+import java.awt.geom.*;
+
+public class Test7047069 {
+    static boolean verbose;
+
+    static final int GROW_SIZE = 24;    // Multiple of cubic & quad curve size
+
+    float squareflat;           // Square of the flatness parameter
+                    // for testing against squared lengths
+
+    int limit;              // Maximum number of recursion levels
+
+    float hold[] = new float[14];   // The cache of interpolated coords
+                    // Note that this must be long enough
+                    // to store a full cubic segment and
+                    // a relative cubic segment to avoid
+                    // aliasing when copying the coords
+                    // of a curve to the end of the array.
+                    // This is also serendipitously equal
+                    // to the size of a full quad segment
+                    // and 2 relative quad segments.
+
+    int holdEnd;            // The index of the last curve segment
+                    // being held for interpolation
+
+    int holdIndex;          // The index of the curve segment
+                    // that was last interpolated.  This
+                    // is the curve segment ready to be
+                    // returned in the next call to
+                    // currentSegment().
+
+    int levels[];           // The recursion level at which
+                    // each curve being held in storage
+                    // was generated.
+
+    int levelIndex;         // The index of the entry in the
+                    // levels array of the curve segment
+                    // at the holdIndex
+
+    public static void subdivide(float src[], int srcoff,
+                                 float left[], int leftoff,
+                                 float right[], int rightoff)
+    {
+        float x1 = src[srcoff + 0];
+        float y1 = src[srcoff + 1];
+        float ctrlx = src[srcoff + 2];
+        float ctrly = src[srcoff + 3];
+        float x2 = src[srcoff + 4];
+        float y2 = src[srcoff + 5];
+        if (left != null) {
+            left[leftoff + 0] = x1;
+            left[leftoff + 1] = y1;
+        }
+        if (right != null) {
+            right[rightoff + 4] = x2;
+            right[rightoff + 5] = y2;
+        }
+        x1 = (x1 + ctrlx) / 2f;
+        y1 = (y1 + ctrly) / 2f;
+        x2 = (x2 + ctrlx) / 2f;
+        y2 = (y2 + ctrly) / 2f;
+        ctrlx = (x1 + x2) / 2f;
+        ctrly = (y1 + y2) / 2f;
+        if (left != null) {
+            left[leftoff + 2] = x1;
+            left[leftoff + 3] = y1;
+            left[leftoff + 4] = ctrlx;
+            left[leftoff + 5] = ctrly;
+        }
+        if (right != null) {
+            right[rightoff + 0] = ctrlx;
+            right[rightoff + 1] = ctrly;
+            right[rightoff + 2] = x2;
+            right[rightoff + 3] = y2;
+        }
+    }
+
+    public static double getFlatnessSq(float coords[], int offset) {
+        return Line2D.ptSegDistSq(coords[offset + 0], coords[offset + 1],
+                                  coords[offset + 4], coords[offset + 5],
+                                  coords[offset + 2], coords[offset + 3]);
+    }
+
+    public Test7047069() {
+        this.squareflat = .0001f * .0001f;
+        holdIndex = hold.length - 6;
+        holdEnd = hold.length - 2;
+        hold[holdIndex + 0] = (float) (Math.random() * 100);
+        hold[holdIndex + 1] = (float) (Math.random() * 100);
+        hold[holdIndex + 2] = (float) (Math.random() * 100);
+        hold[holdIndex + 3] = (float) (Math.random() * 100);
+        hold[holdIndex + 4] = (float) (Math.random() * 100);
+        hold[holdIndex + 5] = (float) (Math.random() * 100);
+        levelIndex = 0;
+        this.limit = 10;
+        this.levels = new int[limit + 1];
+    }
+
+    /*
+     * Ensures that the hold array can hold up to (want) more values.
+     * It is currently holding (hold.length - holdIndex) values.
+     */
+    void ensureHoldCapacity(int want) {
+        if (holdIndex - want < 0) {
+            int have = hold.length - holdIndex;
+            int newsize = hold.length + GROW_SIZE;
+            float newhold[] = new float[newsize];
+            System.arraycopy(hold, holdIndex,
+                     newhold, holdIndex + GROW_SIZE,
+                     have);
+            if (verbose) System.err.println("old hold = "+hold+"["+hold.length+"]");
+            if (verbose) System.err.println("replacement hold = "+newhold+"["+newhold.length+"]");
+            hold = newhold;
+            if (verbose) System.err.println("new hold = "+hold+"["+hold.length+"]");
+            if (verbose) System.err.println("replacement hold still = "+newhold+"["+newhold.length+"]");
+            holdIndex += GROW_SIZE;
+            holdEnd += GROW_SIZE;
+        }
+    }
+
+    private boolean next() {
+        if (holdIndex >= holdEnd) {
+            return false;
+        }
+
+        int level = levels[levelIndex];
+        while (level < limit) {
+            if (getFlatnessSq(hold, holdIndex) < squareflat) {
+                break;
+            }
+
+            ensureHoldCapacity(4);
+            subdivide(hold, holdIndex,
+                      hold, holdIndex - 4,
+                      hold, holdIndex);
+            holdIndex -= 4;
+
+            // Now that we have subdivided, we have constructed
+            // two curves of one depth lower than the original
+            // curve.  One of those curves is in the place of
+            // the former curve and one of them is in the next
+            // set of held coordinate slots.  We now set both
+            // curves level values to the next higher level.
+            level++;
+            levels[levelIndex] = level;
+            levelIndex++;
+            levels[levelIndex] = level;
+        }
+
+        // This curve segment is flat enough, or it is too deep
+        // in recursion levels to try to flatten any more.  The
+        // two coordinates at holdIndex+4 and holdIndex+5 now
+        // contain the endpoint of the curve which can be the
+        // endpoint of an approximating line segment.
+        holdIndex += 4;
+        levelIndex--;
+        return true;
+    }
+
+    public static void main(String argv[]) {
+        verbose = (argv.length > 0);
+        for (int i = 0; i < 100000; i++) {
+            Test7047069 st = new Test7047069();
+            while (st.next()) {}
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7048332/Test7048332.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7048332
+ * @summary Cadd_cmpLTMask doesn't handle 64-bit tmp register properly
+ *
+ * @run main/othervm -Xbatch Test7048332
+ */
+
+
+public class Test7048332 {
+
+  static int capacity = 2;
+  static int first = 1;
+  static int last = 2;
+
+  static int test(int i1, int i2, int i3, int i4, int i5, int i6) {
+    final int result;
+    if (last >= first) {
+      result = last - first;
+    } else {
+      result = last - first + capacity;
+    }
+    return result;
+  }
+
+  public static void main(String [] args) {
+    for (int i = 0; i < 11000; i++) {
+      last = (i & 1) << 1; // 0 or 2
+      int k = test(1, 2, 3, 4, 5, 6);
+      if (k != 1) {
+        System.out.println("FAILED: " + k + " != 1");
+        System.exit(97);
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7052494/Test7052494.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7052494
+ * @summary Eclipse test fails on JDK 7 b142
+ *
+ * @run main/othervm -Xbatch Test7052494
+ */
+
+
+public class Test7052494 {
+
+  static int test1(int i, int limit) {
+    int result = 0;
+    while (i++ != 0) {
+      if (result >= limit)
+        break;
+      result = i*2;
+    }
+    return result;
+  }
+
+  static int test2(int i, int limit) {
+    int result = 0;
+    while (i-- != 0) {
+      if (result <= limit)
+        break;
+      result = i*2;
+    }
+    return result;
+  }
+
+  static void test3(int i, int limit, int arr[]) {
+    while (i++ != 0) {
+      if (arr[i-1] >= limit)
+        break;
+      arr[i] = i*2;
+    }
+  }
+
+  static void test4(int i, int limit, int arr[]) {
+    while (i-- != 0) {
+      if (arr[arr.length + i + 1] <= limit)
+        break;
+      arr[arr.length + i] = i*2;
+    }
+  }
+
+  // Empty loop rolls through MAXINT if i > 0
+
+  static final int limit5 = Integer.MIN_VALUE + 10000;
+
+  static int test5(int i) {
+    int result = 0;
+    while (i++ != limit5) {
+      result = i*2;
+    }
+    return result;
+  }
+
+  // Empty loop rolls through MININT if i < 0
+
+  static final int limit6 = Integer.MAX_VALUE - 10000;
+
+  static int test6(int i) {
+    int result = 0;
+    while (i-- != limit6) {
+      result = i*2;
+    }
+    return result;
+  }
+
+  public static void main(String [] args) {
+    boolean failed = false;
+    int[] arr = new int[8];
+    int[] ar3 = { 0, 0, 4, 6, 8, 10, 0, 0 };
+    int[] ar4 = { 0, 0, 0, -10, -8, -6, -4, 0 };
+    System.out.println("test1");
+    for (int i = 0; i < 11000; i++) {
+      int k = test1(1, 10);
+      if (k != 10) {
+        System.out.println("FAILED: " + k + " != 10");
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("test2");
+    for (int i = 0; i < 11000; i++) {
+      int k = test2(-1, -10);
+      if (k != -10) {
+        System.out.println("FAILED: " + k + " != -10");
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("test3");
+    for (int i = 0; i < 11000; i++) {
+      java.util.Arrays.fill(arr, 0);
+      test3(1, 10, arr);
+      if (!java.util.Arrays.equals(arr,ar3)) {
+        System.out.println("FAILED: arr = { " + arr[0] + ", "
+                                              + arr[1] + ", "
+                                              + arr[2] + ", "
+                                              + arr[3] + ", "
+                                              + arr[4] + ", "
+                                              + arr[5] + ", "
+                                              + arr[6] + ", "
+                                              + arr[7] + " }");
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("test4");
+    for (int i = 0; i < 11000; i++) {
+      java.util.Arrays.fill(arr, 0);
+      test4(-1, -10, arr);
+      if (!java.util.Arrays.equals(arr,ar4)) {
+        System.out.println("FAILED: arr = { " + arr[0] + ", "
+                                              + arr[1] + ", "
+                                              + arr[2] + ", "
+                                              + arr[3] + ", "
+                                              + arr[4] + ", "
+                                              + arr[5] + ", "
+                                              + arr[6] + ", "
+                                              + arr[7] + " }");
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("test5");
+    for (int i = 0; i < 11000; i++) {
+      int k = test5(limit6);
+      if (k != limit5*2) {
+        System.out.println("FAILED: " + k + " != " + limit5*2);
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("test6");
+    for (int i = 0; i < 11000; i++) {
+      int k = test6(limit5);
+      if (k != limit6*2) {
+        System.out.println("FAILED: " + k + " != " + limit6*2);
+        failed = true;
+        break;
+      }
+    }
+    System.out.println("finish");
+    if (failed)
+      System.exit(97);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/6941923/test6941923.sh	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,179 @@
+##
+## @test @(#)test6941923.sh
+## @bug 6941923 
+## @summary test new added flags for gc log rotation 
+## @author yqi 
+## @run shell test6941923.sh
+##
+
+## skip on windows
+OS=`uname -s`
+case "$OS" in
+  SunOS | Linux )
+    NULL=/dev/null
+    PS=":"
+    FS="/"
+    ;;
+  Windows_* )
+    echo "Test skipped for Windows"
+    exit 0 
+    ;;
+  * )
+    echo "Unrecognized system!"
+    exit 1;
+    ;;
+esac
+
+if [ "${JAVA_HOME}" = "" ]
+then
+  echo "JAVA_HOME not set"
+  exit 0
+fi
+
+$JAVA_HOME/bin/java -version > $NULL 2>&1
+
+if [ $? != 0 ]; then
+  echo "Wrong JAVA_HOME? JAVA_HOME: $JAVA_HOME"
+  exit $?
+fi
+
+# create a small test case
+testname="Test"
+if [ -e ${testname}.java ]; then
+  rm -rf ${testname}.*
+fi
+
+cat >> ${testname}.java << __EOF__
+import java.util.Vector;
+
+public class Test implements Runnable
+{
+  private boolean _should_stop = false;
+
+  public static void main(String[] args) throws Exception {
+
+    long limit = Long.parseLong(args[0]) * 60L * 1000L;   // minutes
+    Test t = new Test();
+    t.set_stop(false);
+    Thread thr = new Thread(t);
+    thr.start();
+
+    long time1 = System.currentTimeMillis();
+    long time2 = System.currentTimeMillis();
+    while (time2 - time1 < limit) {
+      try {
+        Thread.sleep(2000); // 2 seconds
+      }
+      catch(Exception e) {}
+      time2 = System.currentTimeMillis();
+      System.out.print("\r... " + (time2 - time1)/1000 + " seconds");
+    }
+    System.out.println();
+    t.set_stop(true);
+  }
+  public void set_stop(boolean value) { _should_stop = value; }
+  public void run() {
+    int cap = 20000;
+    int fix_size = 2048;
+    int loop = 0;
+    Vector< byte[] > v = new Vector< byte[] >(cap);
+    while(!_should_stop) {
+      byte[] g = new byte[fix_size];
+      v.add(g);
+      loop++;
+      if (loop > cap) {
+         v = null;
+         cap *= 2;
+         if (cap > 80000) cap = 80000;
+         v = new Vector< byte[] >(cap);
+      }
+    }
+  }
+}
+__EOF__
+
+msgsuccess="succeeded"
+msgfail="failed"
+gclogsize="16K"
+filesize=$((16*1024))
+$JAVA_HOME/bin/javac ${testname}.java > $NULL 2>&1
+
+if [ $? != 0 ]; then
+  echo "$JAVA_HOME/bin/javac ${testname}.java $fail"
+  exit -1
+fi
+
+# test for 2 minutes, it will complete circulation of gc log rotation
+tts=2
+logfile="test.log"
+hotspotlog="hotspot.log"
+
+if [ -e $logfile  ]; then
+  rm -rf $logfile
+fi
+
+#also delete $hotspotlog if it exists
+if [ -f $hotspotlog ]; then 
+  rm -rf $hotspotlog
+fi
+
+options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=$gclogsize"
+echo "Test gc log rotation in same file, wait for $tts minutes ...."
+$JAVA_HOME/bin/java $options $testname $tts
+if [ $? != 0 ]; then
+  echo "$msgfail"
+  exit -1
+fi
+
+# rotation file will be $logfile.0 
+if [ -f $logfile.0 ]; then
+  outfilesize=`ls -l $logfile.0 | awk '{print $5 }'`
+  if [ $((outfilesize)) -ge $((filesize)) ]; then
+    echo $msgsuccess
+  else
+    echo $msgfail
+  fi
+else 
+  echo $msgfail
+  exit -1
+fi
+
+# delete log file 
+rm -rf $logfile.0
+if [ -f $hotspotlog ]; then
+  rm -rf $hotspotlog
+fi
+
+#multiple log files
+numoffiles=3
+options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=$numoffiles -XX:GCLogFileSize=$gclogsize"
+echo "Test gc log rotation in $numoffiles files, wait for $tts minutes ...."
+$JAVA_HOME/bin/java $options $testname $tts
+if [ $? != 0 ]; then
+  echo "$msgfail"
+  exit -1
+fi
+
+atleast=0    # at least size of numoffile-1 files >= $gclogsize
+tk=0
+while [ $(($tk)) -lt $(($numoffiles)) ]
+do
+  if [ -f $logfile.$tk ]; then
+    outfilesize=`ls -l $logfile.$tk | awk '{ print $5 }'`
+    if [ $(($outfilesize)) -ge $(($filesize)) ]; then
+      atleast=$((atleast+1))
+    fi
+  fi
+  tk=$((tk+1))
+done
+
+rm -rf $logfile.*
+rm -rf $testname.*
+rm -rf $hotspotlog
+
+if [ $(($atleast)) -ge $(($numoffiles-1)) ]; then
+  echo $msgsuccess
+else
+  echo $msgfail
+  exit -1
+fi
--- a/jaxp/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxp/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 39bf6dcaab2336326b21743cef7042d0a2de9ba0 jdk7-b144
 10ca7570f47f2ae4132648f7e8da1a05f1a98a15 jdk7-b145
 bcd31fa1e3c6f51b4fdd427ef905188cdac57164 jdk7-b146
+fc268cd1dd5d2e903ccd4b0275e1f9c2461ed30c jdk7-b147
--- a/jaxp/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxp/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/jaxp/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxp/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -25,12 +25,23 @@
 
 # Properties for jprt
 
-# Use whatever release that the submitted job requests
-jprt.tools.default.release=${jprt.submit.release}
+# The release to build
+jprt.tools.default.release=jdk8
 
 # The different build flavors we want, we override here so we just get these 2
 jprt.build.flavors=product,fastdebug
 
+# Standard list of jprt build targets for this source tree
+jprt.build.targets= 						\
+    solaris_sparc_5.10-{product|fastdebug}, 			\
+    solaris_sparcv9_5.10-{product|fastdebug}, 			\
+    solaris_i586_5.10-{product|fastdebug}, 			\
+    solaris_x64_5.10-{product|fastdebug}, 			\
+    linux_i586_2.6-{product|fastdebug}, 			\
+    linux_x64_2.6-{product|fastdebug}, 				\
+    windows_i586_5.1-{product|fastdebug}, 			\
+    windows_x64_5.2-{product|fastdebug}
+
 # Directories to be excluded from the source bundles
 jprt.bundle.exclude.src.dirs=build dist webrev
 
--- a/jaxws/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxws/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 6bd683f2d527c9afd47beac1cbf614506929598d jdk7-b144
 42bfba80beb7d3260b7b135b9a39202b512eb8c2 jdk7-b145
 05469dd4c3662c454f8a019e492543add60795cc jdk7-b146
+d13b1f877bb5ed8dceb2f7ec10365d1db5f70b2d jdk7-b147
--- a/jaxws/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxws/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/jaxws/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/jaxws/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -25,12 +25,23 @@
 
 # Properties for jprt
 
-# Use whatever release that the submitted job requests
-jprt.tools.default.release=${jprt.submit.release}
+# The release to build
+jprt.tools.default.release=jdk8
 
 # The different build flavors we want, we override here so we just get these 2
 jprt.build.flavors=product,fastdebug
 
+# Standard list of jprt build targets for this source tree
+jprt.build.targets= 						\
+    solaris_sparc_5.10-{product|fastdebug}, 			\
+    solaris_sparcv9_5.10-{product|fastdebug}, 			\
+    solaris_i586_5.10-{product|fastdebug}, 			\
+    solaris_x64_5.10-{product|fastdebug}, 			\
+    linux_i586_2.6-{product|fastdebug}, 			\
+    linux_x64_2.6-{product|fastdebug}, 				\
+    windows_i586_5.1-{product|fastdebug}, 			\
+    windows_x64_5.2-{product|fastdebug}
+
 # Directories to be excluded from the source bundles
 jprt.bundle.exclude.src.dirs=build dist webrev
 
--- a/jdk/src/share/classes/javax/swing/RepaintManager.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/jdk/src/share/classes/javax/swing/RepaintManager.java	Wed Aug 17 22:47:12 2011 -0700
@@ -758,6 +758,11 @@
             for(i=0 ; i < count ; i++) {
                 dirtyComponent = roots.get(i);
                 rect = tmpDirtyComponents.get(dirtyComponent);
+                // Sometimes when RepaintManager is changed during the painting
+                // we may get null here, see #6995769 for details
+                if (rect == null) {
+                    continue;
+                }
                 localBoundsH = dirtyComponent.getHeight();
                 localBoundsW = dirtyComponent.getWidth();
 
--- a/jdk/src/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/jdk/src/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java	Wed Aug 17 22:47:12 2011 -0700
@@ -2167,7 +2167,7 @@
     /**
      * Returns a {@code LayoutStyle} implementing the Java look and feel
      * design guidelines as specified at
-     * <a href="http://java.sun.com/products/jlf/ed2/book/HIG.Visual2.html">http://java.sun.com/products/jlf/ed2/book/HIG.Visual2.html</a>.
+     * <a href="http://www.oracle.com/technetwork/java/hig-136467.html">http://www.oracle.com/technetwork/java/hig-136467.html</a>.
      *
      * @return LayoutStyle implementing the Java look and feel design
      *         guidelines
--- a/jdk/src/share/classes/sun/swing/DefaultLayoutStyle.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/jdk/src/share/classes/sun/swing/DefaultLayoutStyle.java	Wed Aug 17 22:47:12 2011 -0700
@@ -48,10 +48,12 @@
     @Override
     public int getPreferredGap(JComponent component1, JComponent component2,
             ComponentPlacement type, int position, Container parent) {
-
         if (component1 == null || component2 == null || type == null) {
             throw new NullPointerException();
         }
+
+        checkPosition(position);
+
         if (type == ComponentPlacement.INDENT &&
                 (position == SwingConstants.EAST ||
                  position == SwingConstants.WEST)) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/awt/MenuBar/MenuBarSetFont/MenuBarSetFont.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.awt.SunToolkit;
+
+import java.awt.Button;
+import java.awt.CardLayout;
+import java.awt.Font;
+import java.awt.Frame;
+import java.awt.Menu;
+import java.awt.MenuBar;
+import java.awt.Point;
+import java.awt.Robot;
+import java.awt.Toolkit;
+import java.awt.event.ActionEvent;
+import java.awt.event.ActionListener;
+import java.awt.event.InputEvent;
+
+/**
+ * @test
+ * @bug 6263470
+ * @summary Tries to change font of MenuBar. Test passes if the font has changed
+ * fails otherwise.
+ * @author Vyacheslav.Baranov: area=menu
+ * @run main MenuBarSetFont
+ */
+public final class MenuBarSetFont {
+
+    private static final Frame frame = new Frame();
+    private static final MenuBar mb = new MenuBar();
+    private static volatile boolean clicked;
+
+    private static final class Listener implements ActionListener {
+        @Override
+        public void actionPerformed(final ActionEvent e) {
+            //Click on this button is performed
+            //_only_ if font of MenuBar is not changed on time
+            MenuBarSetFont.clicked = true;
+        }
+    }
+
+    private static void addMenu() {
+        mb.add(new Menu("w"));
+        frame.validate();
+    }
+
+    public static void main(final String[] args) throws Exception {
+        //Components initialization.
+        frame.setMenuBar(mb);
+        mb.setFont(new Font("Helvetica", Font.ITALIC, 5));
+
+        final Button button = new Button("Click Me");
+        button.addActionListener(new Listener());
+        frame.setLayout(new CardLayout());
+        frame.add(button, "First");
+        frame.setSize(400, 400);
+        frame.setVisible(true);
+        sleep();
+
+        final int fInsets = frame.getInsets().top;  //Frame insets without menu.
+        addMenu();
+        final int fMenuInsets = frame.getInsets().top; //Frame insets with menu.
+        final int menuBarHeight = fMenuInsets - fInsets;
+        // There is no way to change menubar height on windows. But on windows
+        // we can try to split menubar in 2 rows.
+        for (int i = 0; i < 100 && fMenuInsets == frame.getInsets().top; ++i) {
+            // Fill whole menubar.
+            addMenu();
+        }
+
+        mb.remove(0);
+        frame.validate();
+        sleep();
+
+        // Test execution.
+        // On XToolkit, menubar font should be changed to 60.
+        // On WToolkit, menubar font should be changed to default and menubar
+        // should be splitted in 2 rows.
+        mb.setFont(new Font("Helvetica", Font.ITALIC, 60));
+        sleep();
+
+        final Robot r = new Robot();
+        r.setAutoDelay(200);
+        final Point pt = frame.getLocation();
+        r.mouseMove(pt.x + frame.getWidth() / 2,
+                    pt.y + fMenuInsets + menuBarHeight / 2);
+        r.mousePress(InputEvent.BUTTON1_MASK);
+        r.mouseRelease(InputEvent.BUTTON1_MASK);
+
+        sleep();
+        frame.dispose();
+
+        if (clicked) {
+            fail("Font was not changed");
+        }
+    }
+
+    private static void sleep() {
+        ((SunToolkit) Toolkit.getDefaultToolkit()).realSync();
+        try {
+            Thread.sleep(500L);
+        } catch (InterruptedException ignored) {
+        }
+    }
+
+    private static void fail(final String message) {
+        throw new RuntimeException(message);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/GroupLayout/7071166/bug7071166.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7071166
+ * @summary LayoutStyle.getPreferredGap() - IAE is expected but not thrown
+ * @author Pavel Porvatov
+ */
+
+import javax.swing.*;
+import static javax.swing.SwingConstants.*;
+import java.awt.*;
+
+public class bug7071166 {
+    private static final int[] POSITIONS = {NORTH, EAST, SOUTH, WEST, // valid positions
+            NORTH_EAST, SOUTH_EAST, SOUTH_WEST, NORTH_WEST, 123, -456}; // invalid positions
+
+    public static void main(String[] args) throws Exception {
+        for (UIManager.LookAndFeelInfo lookAndFeelInfo : UIManager.getInstalledLookAndFeels()) {
+            UIManager.setLookAndFeel(lookAndFeelInfo.getClassName());
+
+            System.out.println("LookAndFeel: " + lookAndFeelInfo.getName());
+
+            SwingUtilities.invokeAndWait(new Runnable() {
+                public void run() {
+                    LayoutStyle layoutStyle = LayoutStyle.getInstance();
+
+                    System.out.println("LayoutStyle: " + layoutStyle);
+
+                    for (int i = 0; i < POSITIONS.length; i++) {
+                        int position = POSITIONS[i];
+
+                        try {
+                            layoutStyle.getPreferredGap(new JButton(), new JButton(),
+                                    LayoutStyle.ComponentPlacement.RELATED, position, new Container());
+
+                            if (i > 3) {
+                                throw new RuntimeException("IllegalArgumentException is not thrown for position " +
+                                        position);
+                            }
+                        } catch (IllegalArgumentException e) {
+                            if (i <= 3) {
+                                throw new RuntimeException("IllegalArgumentException is thrown for position " +
+                                        position);
+                            }
+                        }
+                    }
+                }
+            });
+
+            System.out.println("passed");
+        }
+    }
+}
--- a/langtools/.hgtags	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/.hgtags	Wed Aug 17 22:47:12 2011 -0700
@@ -121,3 +121,4 @@
 8eb952f43b117d538f6ca5e9e43ff9ce7646c7ee jdk7-b144
 c455e2ae5c93014ae3fc475aba4509b5f70465f7 jdk7-b145
 9425dd4f53d5bfcd992d9aecea0eb7d8b2d4f62b jdk7-b146
+58bc532d63418ac3c9b42460d89cdaf595c6f3e1 jdk7-b147
--- a/langtools/.jcheck/conf	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/.jcheck/conf	Wed Aug 17 22:47:12 2011 -0700
@@ -1,1 +1,1 @@
-project=jdk7
+project=jdk8
--- a/langtools/make/build.xml	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/make/build.xml	Wed Aug 17 22:47:12 2011 -0700
@@ -336,7 +336,9 @@
             classpath="${dist.lib.dir}/javac.jar"
             includeAntRuntime="no"
             debug="${javac.debug}"
-            debuglevel="${javac.debuglevel}"/>
+            debuglevel="${javac.debuglevel}">
+            <compilerarg line="${javac.lint.opts}"/>
+        </javac>
         <java fork="true"
             jvm="${target.java.home}/bin/java"
             dir="test/tools/javac/diags"
@@ -857,7 +859,10 @@
                srcdir="${make.tools.dir}/CompileProperties"
                destdir="${build.toolclasses.dir}/"
                classpath="${ant.core.lib}"
-               includeantruntime="false"/>
+               bootclasspath="${boot.java.home}/jre/lib/rt.jar"
+               includeantruntime="false">
+            <compilerarg line="${javac.lint.opts}"/>
+        </javac>
         <taskdef name="pcompile"
                  classname="CompilePropertiesTask"
                  classpath="${build.toolclasses.dir}/"/>
@@ -874,6 +879,7 @@
                classpath="${ant.core.lib}"
                includeantruntime="false">
             <compilerarg value="-Xbootclasspath/p:${build.bootstrap.dir}/classes"/>
+            <compilerarg line="${javac.lint.opts}"/>
         </javac>
         <taskdef name="genstubs"
                  classname="GenStubs$$Ant"
--- a/langtools/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -25,12 +25,23 @@
 
 # Properties for jprt
 
-# Use whatever release that the submitted job requests
-jprt.tools.default.release=${jprt.submit.release}
+# The release to build
+jprt.tools.default.release=jdk8
 
 # The different build flavors we want, we override here so we just get these 2
 jprt.build.flavors=product,fastdebug
 
+# Standard list of jprt build targets for this source tree
+jprt.build.targets= 						\
+    solaris_sparc_5.10-{product|fastdebug}, 			\
+    solaris_sparcv9_5.10-{product|fastdebug}, 			\
+    solaris_i586_5.10-{product|fastdebug}, 			\
+    solaris_x64_5.10-{product|fastdebug}, 			\
+    linux_i586_2.6-{product|fastdebug}, 			\
+    linux_x64_2.6-{product|fastdebug}, 				\
+    windows_i586_5.1-{product|fastdebug}, 			\
+    windows_x64_5.2-{product|fastdebug}
+
 # Directories to be excluded from the source bundles
 jprt.bundle.exclude.src.dirs=build dist webrev
 
--- a/langtools/make/tools/CompileProperties/CompileProperties.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/make/tools/CompileProperties/CompileProperties.java	Wed Aug 17 22:47:12 2011 -0700
@@ -222,13 +222,13 @@
                 sortedKeys.add((String)key);
             }
             Collections.sort(sortedKeys);
-            Iterator keys = sortedKeys.iterator();
+            Iterator<String> keys = sortedKeys.iterator();
 
             StringBuffer data = new StringBuffer();
 
             while (keys.hasNext()) {
-                Object key = keys.next();
-                data.append("            { \"" + escape((String)key) + "\", \"" +
+                String key = keys.next();
+                data.append("            { \"" + escape(key) + "\", \"" +
                         escape((String)p.get(key)) + "\" },\n");
             }
 
--- a/langtools/src/share/classes/com/sun/tools/javac/api/JavacTaskImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/api/JavacTaskImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -158,10 +158,10 @@
         } else {
             initContext();
             compilerMain.setOptions(Options.instance(context));
-            compilerMain.filenames = new ListBuffer<File>();
-            List<File> filenames = compilerMain.processArgs(CommandLine.parse(args));
+            compilerMain.filenames = new LinkedHashSet<File>();
+            Collection<File> filenames = compilerMain.processArgs(CommandLine.parse(args));
             if (!filenames.isEmpty())
-                throw new IllegalArgumentException("Malformed arguments " + filenames.toString(" "));
+                throw new IllegalArgumentException("Malformed arguments " + toString(filenames, " "));
             compiler = JavaCompiler.instance(context);
             compiler.keepComments = true;
             compiler.genEndPos = true;
@@ -177,6 +177,17 @@
         }
     }
 
+    <T> String toString(Iterable<T> items, String sep) {
+        String currSep = "";
+        StringBuilder sb = new StringBuilder();
+        for (T item: items) {
+            sb.append(currSep);
+            sb.append(item.toString());
+            currSep = sep;
+        }
+        return sb.toString();
+    }
+
     private void initContext() {
         context.put(JavacTaskImpl.class, this);
         if (context.get(TaskListener.class) != null)
--- a/langtools/src/share/classes/com/sun/tools/javac/code/Source.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/code/Source.java	Wed Aug 17 22:47:12 2011 -0700
@@ -64,8 +64,11 @@
     /** 1.6 reports encoding problems as errors instead of warnings. */
     JDK1_6("1.6"),
 
-    /** 1.7 covers the to be determined language features that will be added in JDK 7. */
-    JDK1_7("1.7");
+    /** 1.7 introduced try-with-resources, multi-catch, string switch, etc. */
+    JDK1_7("1.7"),
+
+    /** 1.8 covers the to be determined language features that will be added in JDK 8. */
+    JDK1_8("1.8");
 
     private static final Context.Key<Source> sourceKey
         = new Context.Key<Source>();
@@ -92,19 +95,21 @@
         tab.put("5", JDK1_5); // Make 5 an alias for 1.5
         tab.put("6", JDK1_6); // Make 6 an alias for 1.6
         tab.put("7", JDK1_7); // Make 7 an alias for 1.7
+        tab.put("8", JDK1_8); // Make 8 an alias for 1.8
     }
 
     private Source(String name) {
         this.name = name;
     }
 
-    public static final Source DEFAULT = JDK1_7;
+    public static final Source DEFAULT = JDK1_8;
 
     public static Source lookup(String name) {
         return tab.get(name);
     }
 
     public Target requiredTarget() {
+        if (this.compareTo(JDK1_8) >= 0) return Target.JDK1_8;
         if (this.compareTo(JDK1_7) >= 0) return Target.JDK1_7;
         if (this.compareTo(JDK1_6) >= 0) return Target.JDK1_6;
         if (this.compareTo(JDK1_5) >= 0) return Target.JDK1_5;
@@ -203,6 +208,8 @@
             return RELEASE_6;
         case JDK1_7:
             return RELEASE_7;
+        case JDK1_8:
+            return RELEASE_8;
         default:
             return null;
         }
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1837,7 +1837,7 @@
         try {
             constructor = rs.resolveDiamond(tree.pos(),
                     localEnv,
-                    clazztype.tsym.type,
+                    clazztype,
                     argtypes,
                     typeargtypes);
         } finally {
@@ -2872,8 +2872,10 @@
 
         if (clazztype.tag == CLASS) {
             List<Type> formals = clazztype.tsym.type.getTypeArguments();
-
-            if (actuals.length() == formals.length() || actuals.length() == 0) {
+            if (actuals.isEmpty()) //diamond
+                actuals = formals;
+
+            if (actuals.length() == formals.length()) {
                 List<Type> a = actuals;
                 List<Type> f = formals;
                 while (a.nonEmpty()) {
@@ -3388,6 +3390,13 @@
         }
 
         @Override
+        public void visitAssignop(JCAssignOp that) {
+            if (that.operator == null)
+                that.operator = new OperatorSymbol(names.empty, syms.unknownType, -1, syms.noSymbol);
+            super.visitAssignop(that);
+        }
+
+        @Override
         public void visitBinary(JCBinary that) {
             if (that.operator == null)
                 that.operator = new OperatorSymbol(names.empty, syms.unknownType, -1, syms.noSymbol);
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Lower.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Lower.java	Wed Aug 17 22:47:12 2011 -0700
@@ -3450,6 +3450,7 @@
                 JCExpression expression = oneCase.getExpression();
 
                 if (expression != null) { // expression for a "default" case is null
+                    expression = TreeInfo.skipParens(expression);
                     String labelExpr = (String) expression.type.constValue();
                     Integer mapping = caseLabelToPosition.put(labelExpr, casePosition);
                     Assert.checkNull(mapping);
@@ -3555,8 +3556,8 @@
                 if (isDefault)
                     caseExpr = null;
                 else {
-                    caseExpr = make.Literal(caseLabelToPosition.get((String)oneCase.
-                                                                    getExpression().
+                    caseExpr = make.Literal(caseLabelToPosition.get((String)TreeInfo.skipParens(oneCase.
+                                                                                                getExpression()).
                                                                     type.constValue()));
                 }
 
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java	Wed Aug 17 22:47:12 2011 -0700
@@ -767,16 +767,13 @@
                                        m2.erasure(types).getParameterTypes()))
                     return ambiguityError(m1, m2);
                 // both abstract, neither overridden; merge throws clause and result type
-                Symbol mostSpecific;
-                if (types.returnTypeSubstitutable(mt1, mt2))
-                    mostSpecific = m1;
-                else if (types.returnTypeSubstitutable(mt2, mt1))
-                    mostSpecific = m2;
-                else {
+                Type mst = mostSpecificReturnType(mt1, mt2);
+                if (mst == null) {
                     // Theoretically, this can't happen, but it is possible
                     // due to error recovery or mixing incompatible class files
                     return ambiguityError(m1, m2);
                 }
+                Symbol mostSpecific = mst == mt1 ? m1 : m2;
                 List<Type> allThrown = chk.intersect(mt1.getThrownTypes(), mt2.getThrownTypes());
                 Type newSig = types.createMethodTypeWithThrown(mostSpecific.type, allThrown);
                 MethodSymbol result = new MethodSymbol(
@@ -859,6 +856,28 @@
         }
     }
     //where
+    Type mostSpecificReturnType(Type mt1, Type mt2) {
+        Type rt1 = mt1.getReturnType();
+        Type rt2 = mt2.getReturnType();
+
+        if (mt1.tag == FORALL && mt2.tag == FORALL) {
+            //if both are generic methods, adjust return type ahead of subtyping check
+            rt1 = types.subst(rt1, mt1.getTypeArguments(), mt2.getTypeArguments());
+        }
+        //first use subtyping, then return type substitutability
+        if (types.isSubtype(rt1, rt2)) {
+            return mt1;
+        } else if (types.isSubtype(rt2, rt1)) {
+            return mt2;
+        } else if (types.returnTypeSubstitutable(mt1, mt2)) {
+            return mt1;
+        } else if (types.returnTypeSubstitutable(mt2, mt1)) {
+            return mt2;
+        } else {
+            return null;
+        }
+    }
+    //where
     Symbol ambiguityError(Symbol m1, Symbol m2) {
         if (((m1.flags() | m2.flags()) & CLASH) != 0) {
             return (m1.flags() & CLASH) == 0 ? m1 : m2;
--- a/langtools/src/share/classes/com/sun/tools/javac/jvm/Target.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/jvm/Target.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,10 @@
     JDK1_6("1.6", 50, 0),
 
     /** JDK 7. */
-    JDK1_7("1.7", 51, 0);
+    JDK1_7("1.7", 51, 0),
+
+    /** JDK 8. */ // For now, a clone of 7
+    JDK1_8("1.8", 51, 0);
 
     private static final Context.Key<Target> targetKey =
         new Context.Key<Target>();
@@ -99,6 +102,7 @@
         tab.put("5", JDK1_5);
         tab.put("6", JDK1_6);
         tab.put("7", JDK1_7);
+        tab.put("8", JDK1_8);
     }
 
     public final String name;
@@ -110,7 +114,7 @@
         this.minorVersion = minorVersion;
     }
 
-    public static final Target DEFAULT = JDK1_7;
+    public static final Target DEFAULT = JDK1_8;
 
     public static Target lookup(String name) {
         return tab.get(name);
--- a/langtools/src/share/classes/com/sun/tools/javac/main/Main.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/main/Main.java	Wed Aug 17 22:47:12 2011 -0700
@@ -31,7 +31,10 @@
 import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
+import java.util.Collection;
+import java.util.LinkedHashSet;
 import java.util.MissingResourceException;
+import java.util.Set;
 import javax.tools.JavaFileManager;
 import javax.tools.JavaFileObject;
 import javax.annotation.processing.Processor;
@@ -107,8 +110,7 @@
         }
 
         public void addFile(File f) {
-            if (!filenames.contains(f))
-                filenames.append(f);
+            filenames.add(f);
         }
 
         public void addClassName(String s) {
@@ -136,7 +138,7 @@
 
     /** The list of source files to process
      */
-    public ListBuffer<File> filenames = null; // XXX sb protected
+    public Set<File> filenames = null; // XXX sb protected
 
     /** List of class files names passed on the command line
      */
@@ -202,7 +204,7 @@
      *  in `options' table and return all source filenames.
      *  @param flags    The array of command line arguments.
      */
-    public List<File> processArgs(String[] flags) { // XXX sb protected
+    public Collection<File> processArgs(String[] flags) { // XXX sb protected
         int ac = 0;
         while (ac < flags.length) {
             String flag = flags[ac];
@@ -294,7 +296,7 @@
             showClass(showClass);
         }
 
-        return filenames.toList();
+        return filenames;
     }
     // where
         private boolean checkDirectory(OptionName optName) {
@@ -342,7 +344,7 @@
         if (options == null)
             options = Options.instance(context); // creates a new one
 
-        filenames = new ListBuffer<File>();
+        filenames = new LinkedHashSet<File>();
         classnames = new ListBuffer<String>();
         JavaCompiler comp = null;
         /*
@@ -356,7 +358,7 @@
                 return EXIT_CMDERR;
             }
 
-            List<File> files;
+            Collection<File> files;
             try {
                 files = processArgs(CommandLine.parse(args));
                 if (files == null) {
--- a/langtools/src/share/classes/com/sun/tools/javac/parser/JavacParser.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/parser/JavacParser.java	Wed Aug 17 22:47:12 2011 -0700
@@ -57,7 +57,7 @@
 
     /** The scanner used for lexical analysis.
      */
-    private Lexer S;
+    protected Lexer S;
 
     /** The factory to be used for abstract syntax tree construction.
      */
@@ -99,9 +99,9 @@
         this.allowTWR = source.allowTryWithResources();
         this.allowDiamond = source.allowDiamond();
         this.allowMulticatch = source.allowMulticatch();
+        this.allowStringFolding = fac.options.getBoolean("allowStringFolding", true);
         this.keepDocComments = keepDocComments;
-        if (keepDocComments)
-            docComments = new HashMap<JCTree,String>();
+        docComments = keepDocComments ? new HashMap<JCTree,String>() : null;
         this.keepLineMap = keepLineMap;
         this.errorTree = F.Erroneous();
     }
@@ -146,6 +146,10 @@
      */
     boolean allowTWR;
 
+    /** Switch: should we fold strings?
+     */
+    boolean allowStringFolding;
+
     /** Switch: should we keep docComments?
      */
     boolean keepDocComments;
@@ -757,6 +761,8 @@
          *  by a single literal representing the concatenated string.
          */
         protected StringBuffer foldStrings(JCTree tree) {
+            if (!allowStringFolding)
+                return null;
             List<String> buf = List.nil();
             while (true) {
                 if (tree.getTag() == JCTree.LITERAL) {
@@ -1375,8 +1381,10 @@
         int oldmode = mode;
         mode = TYPE;
         boolean diamondFound = false;
+        int lastTypeargsPos = -1;
         if (S.token() == LT) {
             checkGenerics();
+            lastTypeargsPos = S.pos();
             t = typeArguments(t, true);
             diamondFound = (mode & DIAMOND) != 0;
         }
@@ -1389,6 +1397,7 @@
             S.nextToken();
             t = toP(F.at(pos).Select(t, ident()));
             if (S.token() == LT) {
+                lastTypeargsPos = S.pos();
                 checkGenerics();
                 t = typeArguments(t, true);
                 diamondFound = (mode & DIAMOND) != 0;
@@ -1397,7 +1406,11 @@
         mode = oldmode;
         if (S.token() == LBRACKET) {
             JCExpression e = arrayCreatorRest(newpos, t);
-            if (typeArgs != null) {
+            if (diamondFound) {
+                reportSyntaxError(lastTypeargsPos, "cannot.create.array.with.diamond");
+                return toP(F.at(newpos).Erroneous(List.of(e)));
+            }
+            else if (typeArgs != null) {
                 int pos = newpos;
                 if (!typeArgs.isEmpty() && typeArgs.head.pos != Position.NOPOS) {
                     // note: this should always happen but we should
--- a/langtools/src/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/processing/JavacProcessingEnvironment.java	Wed Aug 17 22:47:12 2011 -0700
@@ -295,59 +295,24 @@
 
     /**
      * Use a service loader appropriate for the platform to provide an
-     * iterator over annotations processors.  If
-     * java.util.ServiceLoader is present use it, otherwise, use
-     * sun.misc.Service, otherwise fail if a loader is needed.
+     * iterator over annotations processors; fails if a loader is
+     * needed but unavailable.
      */
     private class ServiceIterator implements Iterator<Processor> {
-        // The to-be-wrapped iterator.
-        private Iterator<?> iterator;
+        private Iterator<Processor> iterator;
         private Log log;
-        private Class<?> loaderClass;
-        private boolean jusl;
-        private Object loader;
+        private ServiceLoader<Processor> loader;
 
         ServiceIterator(ClassLoader classLoader, Log log) {
-            String loadMethodName;
-
             this.log = log;
             try {
                 try {
-                    loaderClass = Class.forName("java.util.ServiceLoader");
-                    loadMethodName = "load";
-                    jusl = true;
-                } catch (ClassNotFoundException cnfe) {
-                    try {
-                        loaderClass = Class.forName("sun.misc.Service");
-                        loadMethodName = "providers";
-                        jusl = false;
-                    } catch (ClassNotFoundException cnfe2) {
-                        // Fail softly if a loader is not actually needed.
-                        this.iterator = handleServiceLoaderUnavailability("proc.no.service",
-                                                                          null);
-                        return;
-                    }
+                    loader = ServiceLoader.load(Processor.class, classLoader);
+                    this.iterator = loader.iterator();
+                } catch (Exception e) {
+                    // Fail softly if a loader is not actually needed.
+                    this.iterator = handleServiceLoaderUnavailability("proc.no.service", null);
                 }
-
-                // java.util.ServiceLoader.load or sun.misc.Service.providers
-                Method loadMethod = loaderClass.getMethod(loadMethodName,
-                                                          Class.class,
-                                                          ClassLoader.class);
-
-                Object result = loadMethod.invoke(null,
-                                                  Processor.class,
-                                                  classLoader);
-
-                // For java.util.ServiceLoader, we have to call another
-                // method to get the iterator.
-                if (jusl) {
-                    loader = result; // Store ServiceLoader to call reload later
-                    Method m = loaderClass.getMethod("iterator");
-                    result = m.invoke(result); // serviceLoader.iterator();
-                }
-
-                // The result should now be an iterator.
-                this.iterator = (Iterator<?>) result;
             } catch (Throwable t) {
                 log.error("proc.service.problem");
                 throw new Abort(t);
@@ -357,25 +322,21 @@
         public boolean hasNext() {
             try {
                 return iterator.hasNext();
+            } catch(ServiceConfigurationError sce) {
+                log.error("proc.bad.config.file", sce.getLocalizedMessage());
+                throw new Abort(sce);
             } catch (Throwable t) {
-                if ("ServiceConfigurationError".
-                    equals(t.getClass().getSimpleName())) {
-                    log.error("proc.bad.config.file", t.getLocalizedMessage());
-                }
                 throw new Abort(t);
             }
         }
 
         public Processor next() {
             try {
-                return (Processor)(iterator.next());
+                return iterator.next();
+            } catch (ServiceConfigurationError sce) {
+                log.error("proc.bad.config.file", sce.getLocalizedMessage());
+                throw new Abort(sce);
             } catch (Throwable t) {
-                if ("ServiceConfigurationError".
-                    equals(t.getClass().getSimpleName())) {
-                    log.error("proc.bad.config.file", t.getLocalizedMessage());
-                } else {
-                    log.error("proc.processor.constructor.error", t.getLocalizedMessage());
-                }
                 throw new Abort(t);
             }
         }
@@ -385,11 +346,9 @@
         }
 
         public void close() {
-            if (jusl) {
+            if (loader != null) {
                 try {
-                    // Call java.util.ServiceLoader.reload
-                    Method reloadMethod = loaderClass.getMethod("reload");
-                    reloadMethod.invoke(loader);
+                    loader.reload();
                 } catch(Exception e) {
                     ; // Ignore problems during a call to reload.
                 }
@@ -761,7 +720,7 @@
      * Leave class public for external testing purposes.
      */
     public static class ComputeAnnotationSet extends
-        ElementScanner7<Set<TypeElement>, Set<TypeElement>> {
+        ElementScanner8<Set<TypeElement>, Set<TypeElement>> {
         final Elements elements;
 
         public ComputeAnnotationSet(Elements elements) {
@@ -1516,6 +1475,14 @@
         return context;
     }
 
+    /**
+     * Internal use method to return the writer being used by the
+     * processing environment.
+     */
+    public PrintWriter getWriter() {
+        return context.get(Log.outKey);
+    }
+
     public String toString() {
         return "javac ProcessingEnvironment";
     }
--- a/langtools/src/share/classes/com/sun/tools/javac/processing/JavacRoundEnvironment.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/processing/JavacRoundEnvironment.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
         else
             throw new AssertionError("Bad implementation type for " + tm);
 
-        ElementScanner7<Set<Element>, DeclaredType> scanner =
+        ElementScanner8<Set<Element>, DeclaredType> scanner =
             new AnnotationSetScanner(result, typeUtil);
 
         for (Element element : rootElements)
@@ -136,7 +136,7 @@
 
     // Could be written as a local class inside getElementsAnnotatedWith
     private class AnnotationSetScanner extends
-        ElementScanner7<Set<Element>, DeclaredType> {
+        ElementScanner8<Set<Element>, DeclaredType> {
         // Insertion-order preserving set
         Set<Element> annotatedElements = new LinkedHashSet<Element>();
         Types typeUtil;
--- a/langtools/src/share/classes/com/sun/tools/javac/processing/PrintingProcessor.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/processing/PrintingProcessor.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,8 +48,7 @@
  * deletion without notice.</b>
  */
 @SupportedAnnotationTypes("*")
-// TODO: Change to version 7 based visitors when available
-@SupportedSourceVersion(SourceVersion.RELEASE_7)
+@SupportedSourceVersion(SourceVersion.RELEASE_8)
 public class PrintingProcessor extends AbstractProcessor {
     PrintWriter writer;
 
@@ -83,7 +82,7 @@
      * Used for the -Xprint option and called by Elements.printElements
      */
     public static class PrintingElementVisitor
-        extends SimpleElementVisitor7<PrintingElementVisitor, Boolean> {
+        extends SimpleElementVisitor8<PrintingElementVisitor, Boolean> {
         int indentation; // Indentation level;
         final PrintWriter writer;
         final Elements elementUtils;
--- a/langtools/src/share/classes/com/sun/tools/javac/resources/compiler.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javac/resources/compiler.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -462,6 +462,9 @@
 compiler.err.cannot.create.array.with.type.arguments=\
     cannot create array with type arguments
 
+compiler.err.cannot.create.array.with.diamond=\
+    cannot create array with ''<>''
+
 #
 # limits.  We don't give the limits in the diagnostic because we expect
 # them to change, yet we want to use the same diagnostic.  These are all
@@ -637,8 +640,7 @@
     Class names, ''{0}'', are only accepted if annotation processing is explicitly requested
 
 compiler.err.proc.no.service=\
-    A service loader class could not be found.\n\
-    Either java.util.ServiceLoader or sun.misc.Service must be available.
+    A ServiceLoader was not usable and is required for annotation processing.
 
 compiler.err.proc.processor.bad.option.name=\
     Bad option name ''{0}'' provided by processor ''{1}''
@@ -647,9 +649,6 @@
 compiler.err.proc.processor.cant.instantiate=\
     Could not instantiate an instance of processor ''{0}''
 
-compiler.err.proc.processor.constructor.error=\
-    Exception thrown while constructing Processor object: {0}
-
 # 0: string
 compiler.err.proc.processor.not.found=\
     Annotation processor ''{0}'' not found
--- a/langtools/src/share/classes/com/sun/tools/javadoc/AnnotationTypeDocImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/AnnotationTypeDocImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,11 +46,11 @@
 public class AnnotationTypeDocImpl
         extends ClassDocImpl implements AnnotationTypeDoc {
 
-    AnnotationTypeDocImpl(DocEnv env, ClassSymbol sym) {
+    public AnnotationTypeDocImpl(DocEnv env, ClassSymbol sym) {
         this(env, sym, null, null, null);
     }
 
-    AnnotationTypeDocImpl(DocEnv env, ClassSymbol sym,
+    public AnnotationTypeDocImpl(DocEnv env, ClassSymbol sym,
                           String doc, JCClassDecl tree, Position.LineMap lineMap) {
         super(env, sym, doc, tree, lineMap);
     }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/AnnotationTypeElementDocImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/AnnotationTypeElementDocImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,11 +45,11 @@
 public class AnnotationTypeElementDocImpl
         extends MethodDocImpl implements AnnotationTypeElementDoc {
 
-    AnnotationTypeElementDocImpl(DocEnv env, MethodSymbol sym) {
+    public AnnotationTypeElementDocImpl(DocEnv env, MethodSymbol sym) {
         super(env, sym);
     }
 
-    AnnotationTypeElementDocImpl(DocEnv env, MethodSymbol sym,
+    public AnnotationTypeElementDocImpl(DocEnv env, MethodSymbol sym,
                                  String doc, JCMethodDecl tree, Position.LineMap lineMap) {
         super(env, sym, doc, tree, lineMap);
     }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/ClassDocImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/ClassDocImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -744,17 +744,16 @@
         // search inner classes
         //### Add private entry point to avoid creating array?
         //### Replicate code in innerClasses here to avoid consing?
-        ClassDoc innerClasses[] = innerClasses();
-        for (int i = 0; i < innerClasses.length; i++) {
-            if (innerClasses[i].name().equals(className) ||
-                //### This is from original javadoc but it looks suspicious to me...
-                //### I believe it is attempting to compensate for the confused
-                //### convention of including the nested class qualifiers in the
-                //### 'name' of the inner class, rather than the true simple name.
-                innerClasses[i].name().endsWith(className)) {
-                return innerClasses[i];
+        for (ClassDoc icd : innerClasses()) {
+            if (icd.name().equals(className) ||
+                    //### This is from original javadoc but it looks suspicious to me...
+                    //### I believe it is attempting to compensate for the confused
+                    //### convention of including the nested class qualifiers in the
+                    //### 'name' of the inner class, rather than the true simple name.
+                    icd.name().endsWith("." + className)) {
+                return icd;
             } else {
-                ClassDoc innercd = ((ClassDocImpl) innerClasses[i]).searchClass(className);
+                ClassDoc innercd = ((ClassDocImpl) icd).searchClass(className);
                 if (innercd != null) {
                     return innercd;
                 }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/Comment.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/Comment.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,7 @@
 
 package com.sun.tools.javadoc;
 
-import java.util.Locale;
-
 import com.sun.javadoc.*;
-
 import com.sun.tools.javac.util.ListBuffer;
 
 /**
@@ -115,7 +112,7 @@
                                 state = TAG_NAME;
                             }
                             break;
-                    };
+                    }
                     if (ch == '\n') {
                         newLine = true;
                     } else if (!isWhite) {
@@ -134,7 +131,7 @@
                     case IN_TEXT:
                         parseCommentComponent(tagName, textStart, lastNonWhite+1);
                         break;
-                };
+                }
             }
 
             /**
@@ -396,16 +393,15 @@
      * else
      *    return -1.
      */
-    private static int inlineTagFound(DocImpl holder,  String inlinetext, int start) {
+    private static int inlineTagFound(DocImpl holder, String inlinetext, int start) {
         DocEnv docenv = holder.env;
-        int linkstart;
-        if (start == inlinetext.length() ||
-              (linkstart = inlinetext.indexOf("{@", start)) == -1) {
+        int linkstart = inlinetext.indexOf("{@", start);
+        if (start == inlinetext.length() || linkstart == -1) {
             return -1;
-        } else if(inlinetext.indexOf('}', start) == -1) {
+        } else if (inlinetext.indexOf('}', linkstart) == -1) {
             //Missing '}'.
             docenv.warning(holder, "tag.Improper_Use_Of_Link_Tag",
-                          inlinetext.substring(linkstart, inlinetext.length()));
+                    inlinetext.substring(linkstart, inlinetext.length()));
             return -1;
         } else {
             return linkstart;
@@ -425,6 +421,7 @@
     /**
      * Return text for this Doc comment.
      */
+    @Override
     public String toString() {
         return text;
     }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/DocEnv.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/DocEnv.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,7 @@
     final Symbol externalizableSym;
 
     /** Access filter (public, protected, ...).  */
-    ModifierFilter showAccess;
+    protected ModifierFilter showAccess;
 
     /** True if we are using a sentence BreakIterator. */
     boolean breakiterator;
@@ -102,7 +102,7 @@
     boolean docClasses = false;
 
     /** Does the doclet only expect pre-1.5 doclet API? */
-    boolean legacyDoclet = true;
+    protected boolean legacyDoclet = true;
 
     /**
      * Set this to true if you would like to not emit any errors, warnings and
@@ -115,7 +115,7 @@
      *
      * @param context      Context for this javadoc instance.
      */
-    private DocEnv(Context context) {
+    protected DocEnv(Context context) {
         context.put(docEnvKey, this);
 
         messager = Messager.instance0(context);
@@ -517,7 +517,7 @@
         messager.exit();
     }
 
-    private Map<PackageSymbol, PackageDocImpl> packageMap =
+    protected Map<PackageSymbol, PackageDocImpl> packageMap =
             new HashMap<PackageSymbol, PackageDocImpl>();
     /**
      * Return the PackageDoc of this package symbol.
@@ -545,12 +545,12 @@
     }
 
 
-    private Map<ClassSymbol, ClassDocImpl> classMap =
+    protected Map<ClassSymbol, ClassDocImpl> classMap =
             new HashMap<ClassSymbol, ClassDocImpl>();
     /**
      * Return the ClassDoc (or a subtype) of this class symbol.
      */
-    ClassDocImpl getClassDoc(ClassSymbol clazz) {
+    public ClassDocImpl getClassDoc(ClassSymbol clazz) {
         ClassDocImpl result = classMap.get(clazz);
         if (result != null) return result;
         if (isAnnotationType(clazz)) {
@@ -565,7 +565,7 @@
     /**
      * Create the ClassDoc (or a subtype) for a class symbol.
      */
-    void makeClassDoc(ClassSymbol clazz, String docComment, JCClassDecl tree, Position.LineMap lineMap) {
+    protected void makeClassDoc(ClassSymbol clazz, String docComment, JCClassDecl tree, Position.LineMap lineMap) {
         ClassDocImpl result = classMap.get(clazz);
         if (result != null) {
             if (docComment != null) result.setRawCommentText(docComment);
@@ -580,20 +580,20 @@
         classMap.put(clazz, result);
     }
 
-    private static boolean isAnnotationType(ClassSymbol clazz) {
+    protected static boolean isAnnotationType(ClassSymbol clazz) {
         return ClassDocImpl.isAnnotationType(clazz);
     }
 
-    private static boolean isAnnotationType(JCClassDecl tree) {
+    protected static boolean isAnnotationType(JCClassDecl tree) {
         return (tree.mods.flags & Flags.ANNOTATION) != 0;
     }
 
-    private Map<VarSymbol, FieldDocImpl> fieldMap =
+    protected Map<VarSymbol, FieldDocImpl> fieldMap =
             new HashMap<VarSymbol, FieldDocImpl>();
     /**
      * Return the FieldDoc of this var symbol.
      */
-    FieldDocImpl getFieldDoc(VarSymbol var) {
+    public FieldDocImpl getFieldDoc(VarSymbol var) {
         FieldDocImpl result = fieldMap.get(var);
         if (result != null) return result;
         result = new FieldDocImpl(this, var);
@@ -603,7 +603,7 @@
     /**
      * Create a FieldDoc for a var symbol.
      */
-    void makeFieldDoc(VarSymbol var, String docComment, JCVariableDecl tree, Position.LineMap lineMap) {
+    protected void makeFieldDoc(VarSymbol var, String docComment, JCVariableDecl tree, Position.LineMap lineMap) {
         FieldDocImpl result = fieldMap.get(var);
         if (result != null) {
             if (docComment != null) result.setRawCommentText(docComment);
@@ -614,13 +614,13 @@
         }
     }
 
-    private Map<MethodSymbol, ExecutableMemberDocImpl> methodMap =
+    protected Map<MethodSymbol, ExecutableMemberDocImpl> methodMap =
             new HashMap<MethodSymbol, ExecutableMemberDocImpl>();
     /**
      * Create a MethodDoc for this MethodSymbol.
      * Should be called only on symbols representing methods.
      */
-    void makeMethodDoc(MethodSymbol meth, String docComment,
+    protected void makeMethodDoc(MethodSymbol meth, String docComment,
                        JCMethodDecl tree, Position.LineMap lineMap) {
         MethodDocImpl result = (MethodDocImpl)methodMap.get(meth);
         if (result != null) {
@@ -649,7 +649,7 @@
      * Create the ConstructorDoc for a MethodSymbol.
      * Should be called only on symbols representing constructors.
      */
-    void makeConstructorDoc(MethodSymbol meth, String docComment,
+    protected void makeConstructorDoc(MethodSymbol meth, String docComment,
                             JCMethodDecl tree, Position.LineMap lineMap) {
         ConstructorDocImpl result = (ConstructorDocImpl)methodMap.get(meth);
         if (result != null) {
@@ -678,7 +678,7 @@
      * Create the AnnotationTypeElementDoc for a MethodSymbol.
      * Should be called only on symbols representing annotation type elements.
      */
-    void makeAnnotationTypeElementDoc(MethodSymbol meth,
+    protected void makeAnnotationTypeElementDoc(MethodSymbol meth,
                                       String docComment, JCMethodDecl tree, Position.LineMap lineMap) {
         AnnotationTypeElementDocImpl result =
             (AnnotationTypeElementDocImpl)methodMap.get(meth);
--- a/langtools/src/share/classes/com/sun/tools/javadoc/DocImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/DocImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -92,7 +92,7 @@
      * So subclasses have the option to do lazy initialization of
      * "documentation" string.
      */
-    String documentation() {
+    protected String documentation() {
         if (documentation == null) documentation = "";
         return documentation;
     }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/JavadocClassReader.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/JavadocClassReader.java	Wed Aug 17 22:47:12 2011 -0700
@@ -35,7 +35,7 @@
 /** Javadoc uses an extended class reader that records package.html entries
  *  @author Neal Gafter
  */
-class JavadocClassReader extends ClassReader {
+public class JavadocClassReader extends ClassReader {
 
     public static JavadocClassReader instance0(Context context) {
         ClassReader instance = context.get(classReaderKey);
@@ -59,7 +59,7 @@
     private EnumSet<JavaFileObject.Kind> noSource = EnumSet.of(JavaFileObject.Kind.CLASS,
                                                                JavaFileObject.Kind.HTML);
 
-    private JavadocClassReader(Context context) {
+    public JavadocClassReader(Context context) {
         super(context, true);
         docenv = DocEnv.instance(context);
         preferSource = true;
--- a/langtools/src/share/classes/com/sun/tools/javadoc/JavadocEnter.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/JavadocEnter.java	Wed Aug 17 22:47:12 2011 -0700
@@ -31,7 +31,6 @@
 import com.sun.tools.javac.code.Kinds;
 import com.sun.tools.javac.code.Symbol.*;
 import com.sun.tools.javac.comp.Enter;
-import com.sun.tools.javac.tree.JCTree;
 import com.sun.tools.javac.tree.JCTree.*;
 import javax.tools.JavaFileObject;
 
@@ -65,6 +64,7 @@
     final Messager messager;
     final DocEnv docenv;
 
+    @Override
     public void main(List<JCCompilationUnit> trees) {
         // count all Enter errors as warnings.
         int nerrors = messager.nerrors;
@@ -73,6 +73,7 @@
         messager.nerrors = nerrors;
     }
 
+    @Override
     public void visitTopLevel(JCCompilationUnit tree) {
         super.visitTopLevel(tree);
         if (tree.sourcefile.isNameCompatible("package-info", JavaFileObject.Kind.SOURCE)) {
@@ -81,10 +82,11 @@
         }
     }
 
+    @Override
     public void visitClassDef(JCClassDecl tree) {
         super.visitClassDef(tree);
-        if (tree.sym != null && tree.sym.kind == Kinds.TYP) {
-            if (tree.sym == null) return;
+        if (tree.sym == null) return;
+        if (tree.sym.kind == Kinds.TYP || tree.sym.kind == Kinds.ERR) {
             String comment = env.toplevel.docComments.get(tree);
             ClassSymbol c = tree.sym;
             docenv.makeClassDoc(c, comment, tree, env.toplevel.lineMap);
@@ -92,6 +94,7 @@
     }
 
     /** Don't complain about a duplicate class. */
+    @Override
     protected void duplicateClass(DiagnosticPosition pos, ClassSymbol c) {}
 
 }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/JavadocMemberEnter.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/JavadocMemberEnter.java	Wed Aug 17 22:47:12 2011 -0700
@@ -38,7 +38,7 @@
  *  done by javac.
  *  @author Neal Gafter
  */
-class JavadocMemberEnter extends MemberEnter {
+public class JavadocMemberEnter extends MemberEnter {
     public static JavadocMemberEnter instance0(Context context) {
         MemberEnter instance = context.get(memberEnterKey);
         if (instance == null)
--- a/langtools/src/share/classes/com/sun/tools/javadoc/PackageDocImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/PackageDocImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,7 @@
     /**
      * Do lazy initialization of "documentation" string.
      */
-    String documentation() {
+    protected String documentation() {
         if (documentation != null)
             return documentation;
         if (docPath != null) {
--- a/langtools/src/share/classes/com/sun/tools/javadoc/ParamTagImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/ParamTagImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,11 @@
     private final String parameterComment;
     private final boolean isTypeParameter;
 
+    /**
+     * Cached inline tags.
+     */
+    private Tag[] inlineTags;
+
     ParamTagImpl(DocImpl holder, String name, String text) {
         super(holder, name, text);
         String[] sa = divideAtWhite();
@@ -71,6 +76,7 @@
     /**
      * Return the kind of this tag.
      */
+    @Override
     public String kind() {
         return "@param";
     }
@@ -85,6 +91,7 @@
     /**
      * convert this object to a string.
      */
+    @Override
     public String toString() {
         return name + ":" + text;
     }
@@ -97,7 +104,11 @@
      * @see TagImpl#inlineTagImpls()
      * @see ThrowsTagImpl#inlineTagImpls()
      */
+    @Override
     public Tag[] inlineTags() {
-        return Comment.getInlineTags(holder, parameterComment);
+        if (inlineTags == null) {
+            inlineTags = Comment.getInlineTags(holder, parameterComment);
+        }
+        return inlineTags;
     }
 }
--- a/langtools/src/share/classes/com/sun/tools/javadoc/SerialFieldTagImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/SerialFieldTagImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,6 +80,9 @@
      */
     private void parseSerialFieldString() {
         int len = text.length();
+        if (len == 0) {
+            return;
+        }
 
         // if no white space found
         /* Skip white space. */
--- a/langtools/src/share/classes/com/sun/tools/javadoc/ThrowsTagImpl.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javadoc/ThrowsTagImpl.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,11 @@
     private final String exceptionName;
     private final String exceptionComment;
 
+    /**
+     * Cached inline tags.
+     */
+    private Tag[] inlineTags;
+
     ThrowsTagImpl(DocImpl holder, String name, String text) {
         super(holder, name, text);
         String[] sa = divideAtWhite();
@@ -93,6 +98,7 @@
      * Return the kind of this tag.  Always "@throws" for instances
      * of ThrowsTagImpl.
      */
+    @Override
     public String kind() {
         return "@throws";
     }
@@ -105,7 +111,11 @@
      * @see TagImpl#inlineTagImpls()
      * @see ParamTagImpl#inlineTagImpls()
      */
+    @Override
     public Tag[] inlineTags() {
-        return Comment.getInlineTags(holder, exceptionComment());
+        if (inlineTags == null) {
+            inlineTags = Comment.getInlineTags(holder, exceptionComment());
+        }
+        return inlineTags;
     }
 }
--- a/langtools/src/share/classes/com/sun/tools/javah/JavahTask.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javah/JavahTask.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 import javax.lang.model.type.TypeMirror;
 import javax.lang.model.type.TypeVisitor;
 import javax.lang.model.util.ElementFilter;
-import javax.lang.model.util.SimpleTypeVisitor7;
+import javax.lang.model.util.SimpleTypeVisitor8;
 import javax.lang.model.util.Types;
 
 import javax.tools.Diagnostic;
@@ -753,7 +753,7 @@
         }
 
         private TypeVisitor<Void,Types> checkMethodParametersVisitor =
-                new SimpleTypeVisitor7<Void,Types>() {
+                new SimpleTypeVisitor8<Void,Types>() {
             @Override
             public Void visitArray(ArrayType t, Types types) {
                 visit(t.getComponentType(), types);
--- a/langtools/src/share/classes/com/sun/tools/javah/LLNI.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javah/LLNI.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 import javax.lang.model.type.TypeMirror;
 import javax.lang.model.type.TypeVisitor;
 import javax.lang.model.util.ElementFilter;
-import javax.lang.model.util.SimpleTypeVisitor7;
+import javax.lang.model.util.SimpleTypeVisitor8;
 
 /*
  * <p><b>This is NOT part of any supported API.
@@ -628,7 +628,7 @@
     }
 
     protected final boolean isLongOrDouble(TypeMirror t) {
-        TypeVisitor<Boolean,Void> v = new SimpleTypeVisitor7<Boolean,Void>() {
+        TypeVisitor<Boolean,Void> v = new SimpleTypeVisitor8<Boolean,Void>() {
             public Boolean defaultAction(TypeMirror t, Void p){
                 return false;
             }
--- a/langtools/src/share/classes/com/sun/tools/javah/TypeSignature.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/com/sun/tools/javah/TypeSignature.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 import javax.lang.model.type.TypeVariable;
 import javax.lang.model.type.TypeVisitor;
 import javax.lang.model.util.Elements;
-import javax.lang.model.util.SimpleTypeVisitor7;
+import javax.lang.model.util.SimpleTypeVisitor8;
 
 /**
  * Returns internal type signature.
@@ -245,7 +245,7 @@
 
 
     String qualifiedTypeName(TypeMirror type) {
-        TypeVisitor<Name, Void> v = new SimpleTypeVisitor7<Name, Void>() {
+        TypeVisitor<Name, Void> v = new SimpleTypeVisitor8<Name, Void>() {
             @Override
             public Name visitArray(ArrayType t, Void p) {
                 return t.getComponentType().accept(this, p);
--- a/langtools/src/share/classes/javax/lang/model/SourceVersion.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/SourceVersion.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,15 @@
      *
      * @since 1.7
      */
-    RELEASE_7;
+    RELEASE_7,
+
+    /**
+     * The version recognized by the Java Platform, Standard Edition
+     * 8.
+     *
+     * @since 1.8
+     */
+    RELEASE_8;
 
     // Note that when adding constants for newer releases, the
     // behavior of latest() and latestSupported() must be updated too.
@@ -135,7 +143,7 @@
      * @return the latest source version that can be modeled
      */
     public static SourceVersion latest() {
-        return RELEASE_7;
+        return RELEASE_8;
     }
 
     private static final SourceVersion latestSupported = getLatestSupported();
@@ -143,9 +151,12 @@
     private static SourceVersion getLatestSupported() {
         try {
             String specVersion = System.getProperty("java.specification.version");
-            if ("1.7".equals(specVersion))
+
+            if ("1.8".equals(specVersion))
+                return RELEASE_8;
+            else if("1.7".equals(specVersion))
                 return RELEASE_7;
-            else if ("1.6".equals(specVersion))
+            else if("1.6".equals(specVersion))
                 return RELEASE_6;
         } catch (SecurityException se) {}
 
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see AbstractAnnotationValueVisitor7
+ * @see AbstractAnnotationValueVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
  * @param <P> the type of the additional parameter to this visitor's methods.
  *
  * @see AbstractAnnotationValueVisitor6
+ * @see AbstractAnnotationValueVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import java.util.List;
+import javax.lang.model.element.*;
+
+import javax.lang.model.type.TypeMirror;
+import static javax.lang.model.SourceVersion.*;
+import javax.lang.model.SourceVersion;
+import javax.annotation.processing.SupportedSourceVersion;
+
+/**
+ * A skeletal visitor for annotation values with default behavior
+ * appropriate for the {@link SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.
+ *
+ * <p> <b>WARNING:</b> The {@code AnnotationValueVisitor} interface
+ * implemented by this class may have methods added to it in the
+ * future to accommodate new, currently unknown, language structures
+ * added to future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new abstract annotation
+ * value visitor class will also be introduced to correspond to the
+ * new language level; this visitor will have different default
+ * behavior for the visit method in question.  When the new visitor is
+ * introduced, all or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods
+ * @param <P> the type of the additional parameter to this visitor's methods.
+ *
+ * @see AbstractAnnotationValueVisitor6
+ * @see AbstractAnnotationValueVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public abstract class AbstractAnnotationValueVisitor8<R, P> extends AbstractAnnotationValueVisitor7<R, P> {
+
+    /**
+     * Constructor for concrete subclasses to call.
+     */
+    protected AbstractAnnotationValueVisitor8() {
+        super();
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractElementVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractElementVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,6 +67,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see AbstractElementVisitor7
+ * @see AbstractElementVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractElementVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractElementVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,6 +62,7 @@
  *            additional parameter.
  *
  * @see AbstractElementVisitor6
+ * @see AbstractElementVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractElementVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.element.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import javax.lang.model.element.*;
+import static javax.lang.model.element.ElementKind.*;
+import static javax.lang.model.SourceVersion.*;
+import javax.lang.model.SourceVersion;
+
+
+/**
+ * A skeletal visitor of program elements with default behavior
+ * appropriate for the {@link SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.
+ *
+ * <p> <b>WARNING:</b> The {@code ElementVisitor} interface
+ * implemented by this class may have methods added to it in the
+ * future to accommodate new, currently unknown, language structures
+ * added to future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new abstract element visitor
+ * class will also be introduced to correspond to the new language
+ * level; this visitor will have different default behavior for the
+ * visit method in question.  When the new visitor is introduced, all
+ * or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see AbstractElementVisitor6
+ * @see AbstractElementVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public abstract class AbstractElementVisitor8<R, P> extends AbstractElementVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses to call.
+     */
+    protected AbstractElementVisitor8(){
+        super();
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -60,6 +60,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see AbstractTypeVisitor7
+ * @see AbstractTypeVisitor8
  * @since 1.6
  */
 public abstract class AbstractTypeVisitor6<R, P> implements TypeVisitor<R, P> {
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -56,6 +56,7 @@
  *            additional parameter.
  *
  * @see AbstractTypeVisitor6
+ * @see AbstractTypeVisitor8
  * @since 1.7
  */
 public abstract class AbstractTypeVisitor7<R, P> extends AbstractTypeVisitor6<R, P> {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2011 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.type.*;
+
+/**
+ * A skeletal visitor of types with default behavior appropriate for
+ * the {@link javax.lang.model.SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.
+ *
+ * <p> <b>WARNING:</b> The {@code TypeVisitor} interface implemented
+ * by this class may have methods added to it in the future to
+ * accommodate new, currently unknown, language structures added to
+ * future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new abstract type visitor
+ * class will also be introduced to correspond to the new language
+ * level; this visitor will have different default behavior for the
+ * visit method in question.  When the new visitor is introduced, all
+ * or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see AbstractTypeVisitor6
+ * @see AbstractTypeVisitor7
+ * @since 1.8
+ */
+public abstract class AbstractTypeVisitor8<R, P> extends AbstractTypeVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses to call.
+     */
+    protected AbstractTypeVisitor8() {
+        super();
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/ElementKindVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementKindVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -78,6 +78,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see ElementKindVisitor7
+ * @see ElementKindVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/ElementKindVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementKindVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -73,6 +73,7 @@
  *            additional parameter.
  *
  * @see ElementKindVisitor6
+ * @see ElementKindVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementKindVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2011 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.element.*;
+import static javax.lang.model.element.ElementKind.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import static javax.lang.model.SourceVersion.*;
+import javax.lang.model.SourceVersion;
+
+/**
+ * A visitor of program elements based on their {@linkplain
+ * ElementKind kind} with default behavior appropriate for the {@link
+ * SourceVersion#RELEASE_8 RELEASE_8} source version.  For {@linkplain
+ * Element elements} <tt><i>XYZ</i></tt> that may have more than one
+ * kind, the <tt>visit<i>XYZ</i></tt> methods in this class delegate
+ * to the <tt>visit<i>XYZKind</i></tt> method corresponding to the
+ * first argument's kind.  The <tt>visit<i>XYZKind</i></tt> methods
+ * call {@link #defaultAction defaultAction}, passing their arguments
+ * to {@code defaultAction}'s corresponding parameters.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code ElementVisitor} interface
+ * implemented by this class may have methods added to it or the
+ * {@code ElementKind} {@code enum} used in this case may have
+ * constants added to it in the future to accommodate new, currently
+ * unknown, language structures added to future versions of the
+ * Java&trade; programming language.  Therefore, methods whose names
+ * begin with {@code "visit"} may be added to this class in the
+ * future; to avoid incompatibilities, classes which extend this class
+ * should not declare any instance methods with names beginning with
+ * {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new abstract element kind
+ * visitor class will also be introduced to correspond to the new
+ * language level; this visitor will have different default behavior
+ * for the visit method in question.  When the new visitor is
+ * introduced, all or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see ElementKindVisitor6
+ * @see ElementKindVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class ElementKindVisitor8<R, P> extends ElementKindVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses; uses {@code null} for the
+     * default value.
+     */
+    protected ElementKindVisitor8() {
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses; uses the argument for the
+     * default value.
+     *
+     * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+     */
+    protected ElementKindVisitor8(R defaultValue) {
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/ElementScanner6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementScanner6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -90,6 +90,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see ElementScanner7
+ * @see ElementScanner8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/ElementScanner7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementScanner7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -86,6 +86,7 @@
  *            additional parameter.
  *
  * @see ElementScanner6
+ * @see ElementScanner8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/ElementScanner8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.element.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import static javax.lang.model.element.ElementKind.*;
+import javax.lang.model.SourceVersion;
+import static javax.lang.model.SourceVersion.*;
+
+
+/**
+ * A scanning visitor of program elements with default behavior
+ * appropriate for the {@link SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.  The <tt>visit<i>XYZ</i></tt> methods in this
+ * class scan their component elements by calling {@code scan} on
+ * their {@linkplain Element#getEnclosedElements enclosed elements},
+ * {@linkplain ExecutableElement#getParameters parameters}, etc., as
+ * indicated in the individual method specifications.  A subclass can
+ * control the order elements are visited by overriding the
+ * <tt>visit<i>XYZ</i></tt> methods.  Note that clients of a scanner
+ * may get the desired behavior be invoking {@code v.scan(e, p)} rather
+ * than {@code v.visit(e, p)} on the root objects of interest.
+ *
+ * <p>When a subclass overrides a <tt>visit<i>XYZ</i></tt> method, the
+ * new method can cause the enclosed elements to be scanned in the
+ * default way by calling <tt>super.visit<i>XYZ</i></tt>.  In this
+ * fashion, the concrete visitor can control the ordering of traversal
+ * over the component elements with respect to the additional
+ * processing; for example, consistently calling
+ * <tt>super.visit<i>XYZ</i></tt> at the start of the overridden
+ * methods will yield a preorder traversal, etc.  If the component
+ * elements should be traversed in some other order, instead of
+ * calling <tt>super.visit<i>XYZ</i></tt>, an overriding visit method
+ * should call {@code scan} with the elements in the desired order.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code ElementVisitor} interface
+ * implemented by this class may have methods added to it in the
+ * future to accommodate new, currently unknown, language structures
+ * added to future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new element scanner visitor
+ * class will also be introduced to correspond to the new language
+ * level; this visitor will have different default behavior for the
+ * visit method in question.  When the new visitor is introduced, all
+ * or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see ElementScanner6
+ * @see ElementScanner7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class ElementScanner8<R, P> extends ElementScanner7<R, P> {
+    /**
+     * Constructor for concrete subclasses; uses {@code null} for the
+     * default value.
+     */
+    protected ElementScanner8(){
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses; uses the argument for the
+     * default value.
+     */
+    protected ElementScanner8(R defaultValue){
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,6 +71,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see SimpleAnnotationValueVisitor7
+ * @see SimpleAnnotationValueVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -66,6 +66,7 @@
  * @param <P> the type of the additional parameter to this visitor's methods.
  *
  * @see SimpleAnnotationValueVisitor6
+ * @see SimpleAnnotationValueVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import java.util.List;
+import javax.lang.model.element.*;
+
+import javax.lang.model.type.TypeMirror;
+import static javax.lang.model.SourceVersion.*;
+import javax.lang.model.SourceVersion;
+import javax.annotation.processing.SupportedSourceVersion;
+
+/**
+ * A simple visitor for annotation values with default behavior
+ * appropriate for the {@link SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.  Visit methods call {@link #defaultAction
+ * defaultAction} passing their arguments to {@code defaultAction}'s
+ * corresponding parameters.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code AnnotationValueVisitor} interface
+ * implemented by this class may have methods added to it in the
+ * future to accommodate new, currently unknown, language structures
+ * added to future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new simple annotation
+ * value visitor class will also be introduced to correspond to the
+ * new language level; this visitor will have different default
+ * behavior for the visit method in question.  When the new visitor is
+ * introduced, all or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods
+ * @param <P> the type of the additional parameter to this visitor's methods.
+ *
+ * @see SimpleAnnotationValueVisitor6
+ * @see SimpleAnnotationValueVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class SimpleAnnotationValueVisitor8<R, P> extends SimpleAnnotationValueVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses; uses {@code null} for the
+     * default value.
+     */
+    protected SimpleAnnotationValueVisitor8() {
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses; uses the argument for the
+     * default value.
+     *
+     * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+     */
+    protected SimpleAnnotationValueVisitor8(R defaultValue) {
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleElementVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleElementVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -76,6 +76,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see SimpleElementVisitor7
+ * @see SimpleElementVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleElementVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleElementVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -69,6 +69,7 @@
  *              for visitors that do not need an additional parameter.
  *
  * @see SimpleElementVisitor6
+ * @see SimpleElementVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleElementVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.element.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import static javax.lang.model.element.ElementKind.*;
+import javax.lang.model.SourceVersion;
+import static javax.lang.model.SourceVersion.*;
+
+/**
+ * A simple visitor of program elements with default behavior
+ * appropriate for the {@link SourceVersion#RELEASE_8 RELEASE_8}
+ * source version.
+ *
+ * Visit methods corresponding to {@code RELEASE_7} and earlier
+ * language constructs call {@link #defaultAction defaultAction},
+ * passing their arguments to {@code defaultAction}'s corresponding
+ * parameters.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code ElementVisitor} interface
+ * implemented by this class may have methods added to it in the
+ * future to accommodate new, currently unknown, language structures
+ * added to future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new simple element visitor
+ * class will also be introduced to correspond to the new language
+ * level; this visitor will have different default behavior for the
+ * visit method in question.  When the new visitor is introduced, all
+ * or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@code Void}
+ *             for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's methods.  Use {@code Void}
+ *              for visitors that do not need an additional parameter.
+ *
+ * @see SimpleElementVisitor6
+ * @see SimpleElementVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class SimpleElementVisitor8<R, P> extends SimpleElementVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses; uses {@code null} for the
+     * default value.
+     */
+    protected SimpleElementVisitor8(){
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses; uses the argument for the
+     * default value.
+     *
+     * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+     */
+    protected SimpleElementVisitor8(R defaultValue){
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleTypeVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleTypeVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -75,6 +75,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see SimpleTypeVisitor7
+ * @see SimpleTypeVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/SimpleTypeVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleTypeVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -68,6 +68,7 @@
  *            additional parameter.
  *
  * @see SimpleTypeVisitor6
+ * @see SimpleTypeVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/SimpleTypeVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.type.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import javax.lang.model.SourceVersion;
+import static javax.lang.model.SourceVersion.*;
+
+/**
+ * A simple visitor of types with default behavior appropriate for the
+ * {@link SourceVersion#RELEASE_7 RELEASE_7} source version.
+ *
+ * Visit methods corresponding to {@code RELEASE_8} and earlier
+ * language constructs call {@link #defaultAction defaultAction},
+ * passing their arguments to {@code defaultAction}'s corresponding
+ * parameters.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code TypeVisitor} interface implemented
+ * by this class may have methods added to it in the future to
+ * accommodate new, currently unknown, language structures added to
+ * future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new simple type visitor
+ * class will also be introduced to correspond to the new language
+ * level; this visitor will have different default behavior for the
+ * visit method in question.  When the new visitor is introduced, all
+ * or portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see SimpleTypeVisitor6
+ * @see SimpleTypeVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class SimpleTypeVisitor8<R, P> extends SimpleTypeVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses; uses {@code null} for the
+     * default value.
+     */
+    protected SimpleTypeVisitor8(){
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses; uses the argument for the
+     * default value.
+     *
+     * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+     */
+    protected SimpleTypeVisitor8(R defaultValue){
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/classes/javax/lang/model/util/TypeKindVisitor6.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/TypeKindVisitor6.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,6 +76,7 @@
  * @author Peter von der Ah&eacute;
  *
  * @see TypeKindVisitor7
+ * @see TypeKindVisitor8
  * @since 1.6
  */
 @SupportedSourceVersion(RELEASE_6)
--- a/langtools/src/share/classes/javax/lang/model/util/TypeKindVisitor7.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/classes/javax/lang/model/util/TypeKindVisitor7.java	Wed Aug 17 22:47:12 2011 -0700
@@ -71,6 +71,7 @@
  *            additional parameter.
  *
  * @see TypeKindVisitor6
+ * @see TypeKindVisitor8
  * @since 1.7
  */
 @SupportedSourceVersion(RELEASE_7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/util/TypeKindVisitor8.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.util;
+
+import javax.lang.model.type.*;
+import javax.annotation.processing.SupportedSourceVersion;
+import static javax.lang.model.element.ElementKind.*;
+import static javax.lang.model.SourceVersion.*;
+import javax.lang.model.SourceVersion;
+
+/**
+ * A visitor of types based on their {@linkplain TypeKind kind} with
+ * default behavior appropriate for the {@link SourceVersion#RELEASE_8
+ * RELEASE_8} source version.  For {@linkplain
+ * TypeMirror types} <tt><i>XYZ</i></tt> that may have more than one
+ * kind, the <tt>visit<i>XYZ</i></tt> methods in this class delegate
+ * to the <tt>visit<i>XYZKind</i></tt> method corresponding to the
+ * first argument's kind.  The <tt>visit<i>XYZKind</i></tt> methods
+ * call {@link #defaultAction defaultAction}, passing their arguments
+ * to {@code defaultAction}'s corresponding parameters.
+ *
+ * <p> Methods in this class may be overridden subject to their
+ * general contract.  Note that annotating methods in concrete
+ * subclasses with {@link java.lang.Override @Override} will help
+ * ensure that methods are overridden as intended.
+ *
+ * <p> <b>WARNING:</b> The {@code TypeVisitor} interface implemented
+ * by this class may have methods added to it in the future to
+ * accommodate new, currently unknown, language structures added to
+ * future versions of the Java&trade; programming language.
+ * Therefore, methods whose names begin with {@code "visit"} may be
+ * added to this class in the future; to avoid incompatibilities,
+ * classes which extend this class should not declare any instance
+ * methods with names beginning with {@code "visit"}.
+ *
+ * <p>When such a new visit method is added, the default
+ * implementation in this class will be to call the {@link
+ * #visitUnknown visitUnknown} method.  A new type kind visitor class
+ * will also be introduced to correspond to the new language level;
+ * this visitor will have different default behavior for the visit
+ * method in question.  When the new visitor is introduced, all or
+ * portions of this visitor may be deprecated.
+ *
+ * @param <R> the return type of this visitor's methods.  Use {@link
+ *            Void} for visitors that do not need to return results.
+ * @param <P> the type of the additional parameter to this visitor's
+ *            methods.  Use {@code Void} for visitors that do not need an
+ *            additional parameter.
+ *
+ * @see TypeKindVisitor6
+ * @see TypeKindVisitor7
+ * @since 1.8
+ */
+@SupportedSourceVersion(RELEASE_8)
+public class TypeKindVisitor8<R, P> extends TypeKindVisitor7<R, P> {
+    /**
+     * Constructor for concrete subclasses to call; uses {@code null}
+     * for the default value.
+     */
+    protected TypeKindVisitor8() {
+        super(null);
+    }
+
+    /**
+     * Constructor for concrete subclasses to call; uses the argument
+     * for the default value.
+     *
+     * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+     */
+    protected TypeKindVisitor8(R defaultValue) {
+        super(defaultValue);
+    }
+}
--- a/langtools/src/share/sample/javac/processing/src/CheckNamesProcessor.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/src/share/sample/javac/processing/src/CheckNamesProcessor.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -138,7 +138,7 @@
     public SourceVersion getSupportedSourceVersion() {
         /*
          * Return latest source version instead of a fixed version
-         * like RELEASE_7.  To return a fixed version, this class
+         * like RELEASE_8.  To return a fixed version, this class
          * could be annotated with a SupportedSourceVersion
          * annotation.
          *
@@ -192,7 +192,7 @@
         /**
          * Visitor to implement name checks.
          */
-        private class NameCheckScanner extends ElementScanner7<Void, Void> {
+        private class NameCheckScanner extends ElementScanner8<Void, Void> {
             // The visitor could be enhanced to return true/false if
             // there were warnings reported or a count of the number
             // of warnings.  This could be facilitated by using
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/com/sun/javadoc/T6735320/SerialFieldTest.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.io.ObjectStreamField;
+import java.io.Serializable;
+
+public class SerialFieldTest implements Serializable {
+    /**
+     * @serialField
+     */
+    private static final ObjectStreamField[] serialPersistentFields = {
+        new ObjectStreamField("i", int.class),
+    };
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/com/sun/javadoc/T6735320/T6735320.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6735320
+ * @summary javadoc throws exception if serialField value is missing
+ * @library  ../lib/
+ * @build    JavadocTester T6735320
+ * @run main T6735320
+ */
+public class T6735320 extends JavadocTester {
+
+    private static final String BUG_ID = "6735320";
+    private static final String[] ARGS = new String[]{
+        "-d", BUG_ID + ".out",
+        SRC_DIR + FS + "SerialFieldTest.java"
+    };
+
+    public String getBugId() {
+        return BUG_ID;
+    }
+
+    public String getBugName() {
+        return getClass().getName();
+    }
+
+    public static void main(String... args) {
+        T6735320 tester = new T6735320();
+        if (tester.runJavadoc(ARGS) != 0) {
+            throw new AssertionError("non-zero return code from javadoc");
+        }
+        if (tester.getErrorOutput().contains("StringIndexOutOfBoundsException")) {
+            throw new AssertionError("javadoc threw StringIndexOutOfBoundsException");
+        }
+    }
+}
--- a/langtools/test/com/sun/javadoc/lib/JavadocTester.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/com/sun/javadoc/lib/JavadocTester.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -197,8 +197,13 @@
         initOutputBuffers();
 
         ByteArrayOutputStream stdout = new ByteArrayOutputStream();
-        PrintStream prev = System.out;
+        PrintStream prevOut = System.out;
         System.setOut(new PrintStream(stdout));
+
+        ByteArrayOutputStream stderr = new ByteArrayOutputStream();
+        PrintStream prevErr = System.err;
+        System.setErr(new PrintStream(stderr));
+
         int returnCode = com.sun.tools.javadoc.Main.execute(
                 getBugName(),
                 new PrintWriter(errors, true),
@@ -207,8 +212,11 @@
                 docletClass,
                 getClass().getClassLoader(),
                 args);
-        System.setOut(prev);
+        System.setOut(prevOut);
         standardOut = new StringBuffer(stdout.toString());
+        System.setErr(prevErr);
+        errors.write(NL + stderr.toString());
+
         printJavadocOutput();
         return returnCode;
     }
--- a/langtools/test/com/sun/javadoc/testLinkTaglet/TestLinkTaglet.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/com/sun/javadoc/testLinkTaglet/TestLinkTaglet.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,9 @@
 
 /*
  * @test
- * @bug      4732864 6280605
+ * @bug      4732864 6280605 7064544
  * @summary  Make sure that you can link from one member to another using
- *           non-qualified name.
+ *           non-qualified name, furthermore, ensure the right one is linked.
  * @author   jamieh
  * @library  ../lib/
  * @build    JavadocTester
@@ -36,7 +36,7 @@
 public class TestLinkTaglet extends JavadocTester {
 
     //Test information.
-    private static final String BUG_ID = "4732864-6280605";
+    private static final String BUG_ID = "4732864-6280605-7064544";
 
     //Javadoc arguments.
     private static final String[] ARGS = new String[] {
--- a/langtools/test/com/sun/javadoc/testLinkTaglet/pkg/C.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/com/sun/javadoc/testLinkTaglet/pkg/C.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,11 @@
 public class C {
 
     public InnerC MEMBER = new InnerC();
+    /**
+     *  A red herring inner class to confuse the matching, thus to
+     *  ensure the right one is linked.
+     */
+    public class RedHerringInnerC {}
 
     /**
      * Link to member in outer class: {@link #MEMBER} <br/>
--- a/langtools/test/tools/javac/6330997/T6330997.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/6330997/T6330997.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,12 @@
 
 /**
  * @test
- * @bug     6330997
+ * @bug     6330997 7025789
  * @summary javac should accept class files with major version of the next release
  * @author  Wei Tao
  * @clean T1 T2
- * @compile -target 7 T1.java
- * @compile -target 7 T2.java
+ * @compile -target 8 T1.java
+ * @compile -target 8 T2.java
  * @run main/othervm T6330997
  */
 
--- a/langtools/test/tools/javac/6402516/CheckLocalElements.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/6402516/CheckLocalElements.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,7 +95,7 @@
         return encl == null ? "" : encl.accept(qualNameVisitor, null);
     }
 
-    private ElementVisitor<String,Void> qualNameVisitor = new SimpleElementVisitor7<String,Void>() {
+    private ElementVisitor<String,Void> qualNameVisitor = new SimpleElementVisitor8<String,Void>() {
         protected String defaultAction(Element e, Void ignore) {
             return "";
         }
--- a/langtools/test/tools/javac/StringsInSwitch/StringSwitches.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/StringsInSwitch/StringSwitches.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2011 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 6827009
+ * @bug 6827009 7071246
  * @summary Positive tests for strings in switch.
  * @author  Joseph D. Darcy
  */
@@ -36,6 +36,7 @@
         failures += testPileup();
         failures += testSwitchingTwoWays();
         failures += testNamedBreak();
+        failures += testExtraParens();
 
         if (failures > 0) {
             throw new RuntimeException();
@@ -260,4 +261,19 @@
         result |= (1<<5);
         return result;
     }
+
+    private static int testExtraParens() {
+        int failures = 1;
+        String s = "first";
+
+        switch(s) {
+        case (("first")):
+            failures = 0;
+            break;
+        case ("second"):
+            throw new RuntimeException("Should not be reached.");
+        }
+
+        return failures;
+    }
 }
--- a/langtools/test/tools/javac/T6358166.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/T6358166.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
 
         Main compilerMain = new Main("javac", new PrintWriter(System.err, true));
         compilerMain.setOptions(Options.instance(context));
-        compilerMain.filenames = new ListBuffer<File>();
+        compilerMain.filenames = new LinkedHashSet<File>();
         compilerMain.processArgs(args);
 
         JavaCompiler c = JavaCompiler.instance(context);
--- a/langtools/test/tools/javac/T6358168.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/T6358168.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,7 @@
 
         Main compilerMain = new Main("javac", new PrintWriter(System.err, true));
         compilerMain.setOptions(Options.instance(context));
-        compilerMain.filenames = new ListBuffer<File>();
+        compilerMain.filenames = new LinkedHashSet<File>();
         compilerMain.processArgs(new String[] { "-d", "." });
 
         JavaCompiler compiler = JavaCompiler.instance(context);
@@ -91,7 +91,7 @@
 
         Main compilerMain = new Main("javac", new PrintWriter(System.err, true));
         compilerMain.setOptions(Options.instance(context));
-        compilerMain.filenames = new ListBuffer<File>();
+        compilerMain.filenames = new LinkedHashSet<File>();
         compilerMain.processArgs(new String[] {
                                      "-XprintRounds",
                                      "-processorpath", testClasses,
--- a/langtools/test/tools/javac/api/T6395981.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/api/T6395981.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,13 +23,13 @@
 
 /*
  * @test
- * @bug     6395981 6458819
+ * @bug     6395981 6458819 7025784
  * @summary JavaCompilerTool and Tool must specify version of JLS and JVMS
  * @author  Peter von der Ah\u00e9
  * @run main/fail T6395981
  * @run main/fail T6395981 RELEASE_3 RELEASE_5 RELEASE_6
  * @run main/fail T6395981 RELEASE_0 RELEASE_1 RELEASE_2 RELEASE_3 RELEASE_4 RELEASE_5 RELEASE_6
- * @run main T6395981 RELEASE_3 RELEASE_4 RELEASE_5 RELEASE_6 RELEASE_7
+ * @run main T6395981 RELEASE_3 RELEASE_4 RELEASE_5 RELEASE_6 RELEASE_7 RELEASE_8
  */
 
 import java.util.EnumSet;
--- a/langtools/test/tools/javac/api/TestOperators.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/api/TestOperators.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
  * @bug     6338064 6346249 6340951 6392177
  * @summary Tree API: can't determine kind of operator
  * @author  Peter von der Ah\u00e9
- * @compile TestOperators.java
+ * @library ../lib
+ * @build JavacTestingAbstractProcessor TestOperators
  * @compile -processor TestOperators -proc:only TestOperators.java
  */
 
@@ -46,7 +47,7 @@
 }
 
 @SupportedAnnotationTypes("TestMe")
-public class TestOperators extends AbstractProcessor {
+public class TestOperators extends JavacTestingAbstractProcessor {
 
     @TestMe(POSTFIX_INCREMENT)
     public int test_POSTFIX_INCREMENT(int i) {
@@ -299,7 +300,7 @@
         final Trees trees = Trees.instance(processingEnv);
         final Messager log = processingEnv.getMessager();
         final Elements elements = processingEnv.getElementUtils();
-        class Scan extends ElementScanner7<Void,Void> {
+        class Scan extends ElementScanner<Void,Void> {
             @Override
             public Void visitExecutable(ExecutableElement e, Void p) {
                 Object debug = e; // info for exception handler
@@ -343,5 +344,4 @@
         }
         return true;
     }
-
 }
--- a/langtools/test/tools/javac/diags/examples.not-yet.txt	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/diags/examples.not-yet.txt	Wed Aug 17 22:47:12 2011 -0700
@@ -31,7 +31,6 @@
 compiler.err.proc.cant.create.loader                    # security exception from service loader
 compiler.err.proc.no.service                            # JavacProcessingEnvironment: no service loader available
 compiler.err.proc.processor.bad.option.name             # cannot happen? masked by javac.err.invalid.A.key
-compiler.err.proc.processor.constructor.error
 compiler.err.proc.service.problem                       # JavacProcessingEnvironment: catch Throwable from service loader
 compiler.err.signature.doesnt.match.intf                # UNUSED
 compiler.err.signature.doesnt.match.supertype           # UNUSED
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/CannotCreateArrayWithDiamond.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.cannot.create.array.with.diamond
+
+class CannotCreateArrayWithDiamond {
+    Object[] array = new Object<>[3];
+}
--- a/langtools/test/tools/javac/enum/6350057/T6350057.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/enum/6350057/T6350057.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,11 @@
 
 /*
  * @test
- * @bug 6350057
+ * @bug 6350057 7025809
  * @summary Test that parameters on implicit enum methods have the right kind
  * @author  Joseph D. Darcy
- * @compile T6350057.java
+ * @library ../../lib
+ * @build   JavacTestingAbstractProcessor T6350057
  * @compile -processor T6350057 -proc:only TestEnum.java
  */
 
@@ -38,9 +39,8 @@
 import javax.lang.model.util.*;
 import static javax.tools.Diagnostic.Kind.*;
 
-@SupportedAnnotationTypes("*")
-public class T6350057 extends AbstractProcessor {
-    static class LocalVarAllergy extends ElementKindVisitor6<Boolean, Void> {
+public class T6350057 extends JavacTestingAbstractProcessor {
+    static class LocalVarAllergy extends ElementKindVisitor<Boolean, Void> {
         @Override
         public Boolean visitTypeAsEnum(TypeElement e, Void v) {
             System.out.println("visitTypeAsEnum: " + e.getSimpleName().toString());
--- a/langtools/test/tools/javac/enum/6424358/T6424358.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/enum/6424358/T6424358.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,11 @@
 
 /*
  * @test
- * @bug     6424358
+ * @bug     6424358 7025809
  * @summary Synthesized static enum method values() is final
  * @author  Peter von der Ah\u00e9
- * @compile T6424358.java
+ * @library ../../lib
+ * @build   JavacTestingAbstractProcessor T6424358
  * @compile -processor T6424358 -proc:only T6424358.java
  */
 
@@ -39,8 +40,7 @@
 
 @interface TestMe {}
 
-@SupportedAnnotationTypes("*")
-public class T6424358 extends AbstractProcessor {
+public class T6424358 extends JavacTestingAbstractProcessor {
     @TestMe enum Test { FOO; }
 
     public boolean process(Set<? extends TypeElement> annotations,
@@ -48,7 +48,7 @@
         final Messager log = processingEnv.getMessager();
         final Elements elements = processingEnv.getElementUtils();
         final TypeElement testMe = elements.getTypeElement("TestMe");
-        class Scan extends ElementScanner7<Void,Void> {
+        class Scan extends ElementScanner<Void,Void> {
             @Override
             public Void visitExecutable(ExecutableElement e, Void p) {
                 System.err.println("Looking at " + e);
@@ -65,9 +65,4 @@
             scan.scan(e);
         return true;
     }
-
-    @Override
-    public SourceVersion getSupportedSourceVersion() {
-        return SourceVersion.latest();
-    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/failover/FailOver15.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,19 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug 6970584 7060926
+ * @summary Attr.PostAttrAnalyzer misses a case
+ *
+ * @compile/fail/ref=FailOver15.out -XDrawDiagnostics -XDshouldStopPolicy=FLOW -XDdev FailOver15.java
+ */
+
+class Test {
+    void m() {
+        new UnknownClass<String, Void>() {
+            public String getString() {
+                String s = "";
+                s += "more";
+                return s;
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/failover/FailOver15.out	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,3 @@
+FailOver15.java:17:10: compiler.err.expected: ';'
+FailOver15.java:11:13: compiler.err.cant.resolve.location: kindname.class, UnknownClass, , , (compiler.misc.location: kindname.class, Test, null)
+2 errors
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/diamond/7046778/DiamondAndInnerClassTest.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7046778
+ * @summary Project Coin: problem with diamond and member inner classes
+ */
+
+import com.sun.source.util.JavacTask;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class DiamondAndInnerClassTest {
+
+    static int checkCount = 0;
+
+    enum TypeArgumentKind {
+        NONE(""),
+        STRING("<String>"),
+        INTEGER("<Integer>"),
+        DIAMOND("<>");
+
+        String typeargStr;
+
+        private TypeArgumentKind(String typeargStr) {
+            this.typeargStr = typeargStr;
+        }
+
+        boolean compatible(TypeArgumentKind that) {
+            switch (this) {
+                case NONE: return true;
+                case STRING: return that != INTEGER;
+                case INTEGER: return that != STRING;
+                default: throw new AssertionError("Unexpected decl kind: " + this);
+            }
+        }
+
+        boolean compatible(ArgumentKind that) {
+            switch (this) {
+                case NONE: return true;
+                case STRING: return that == ArgumentKind.STRING;
+                case INTEGER: return that == ArgumentKind.INTEGER;
+                default: throw new AssertionError("Unexpected decl kind: " + this);
+            }
+        }
+    }
+
+    enum ArgumentKind {
+        OBJECT("(Object)null"),
+        STRING("(String)null"),
+        INTEGER("(Integer)null");
+
+        String argStr;
+
+        private ArgumentKind(String argStr) {
+            this.argStr = argStr;
+        }
+    }
+
+    enum TypeQualifierArity {
+        ONE(1, "A1#TA1"),
+        TWO(2, "A1#TA1.A2#TA2"),
+        THREE(3, "A1#TA1.A2#TA2.A3#TA3");
+
+        int n;
+        String qualifierStr;
+
+        private TypeQualifierArity(int n, String qualifierStr) {
+            this.n = n;
+            this.qualifierStr = qualifierStr;
+        }
+
+        String getType(TypeArgumentKind... typeArgumentKinds) {
+            String res = qualifierStr;
+            for (int i = 1 ; i <= typeArgumentKinds.length ; i++) {
+                res = res.replace("#TA" + i, typeArgumentKinds[i-1].typeargStr);
+            }
+            return res;
+        }
+
+        boolean matches(InnerClassDeclArity innerClassDeclArity) {
+            return n ==innerClassDeclArity.n;
+        }
+    }
+
+    enum InnerClassDeclArity {
+        ONE(1, "class A1<X> { A1(X x1) { } #B }"),
+        TWO(2, "class A1<X1> { class A2<X2> { A2(X1 x1, X2 x2) { }  #B } }"),
+        THREE(3, "class A1<X1> { class A2<X2> { class A3<X3> { A3(X1 x1, X2 x2, X3 x3) { } #B } } }");
+
+        int n;
+        String classDeclStr;
+
+        private InnerClassDeclArity(int n, String classDeclStr) {
+            this.n = n;
+            this.classDeclStr = classDeclStr;
+        }
+    }
+
+    enum ArgumentListArity {
+        ONE(1, "(#A1)"),
+        TWO(2, "(#A1,#A2)"),
+        THREE(3, "(#A1,#A2,#A3)");
+
+        int n;
+        String argListStr;
+
+        private ArgumentListArity(int n, String argListStr) {
+            this.n = n;
+            this.argListStr = argListStr;
+        }
+
+        String getArgs(ArgumentKind... argumentKinds) {
+            String res = argListStr;
+            for (int i = 1 ; i <= argumentKinds.length ; i++) {
+                res = res.replace("#A" + i, argumentKinds[i-1].argStr);
+            }
+            return res;
+        }
+
+        boolean matches(InnerClassDeclArity innerClassDeclArity) {
+            return n ==innerClassDeclArity.n;
+        }
+    }
+
+    public static void main(String... args) throws Exception {
+
+        //create default shared JavaCompiler - reused across multiple compilations
+        JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+        StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+
+        for (InnerClassDeclArity innerClassDeclArity : InnerClassDeclArity.values()) {
+            for (TypeQualifierArity declType : TypeQualifierArity.values()) {
+                if (!declType.matches(innerClassDeclArity)) continue;
+                for (TypeQualifierArity newClassType : TypeQualifierArity.values()) {
+                    if (!newClassType.matches(innerClassDeclArity)) continue;
+                    for (ArgumentListArity argList : ArgumentListArity.values()) {
+                        if (!argList.matches(innerClassDeclArity)) continue;
+                        for (TypeArgumentKind taDecl1 : TypeArgumentKind.values()) {
+                            boolean isDeclRaw = taDecl1 == TypeArgumentKind.NONE;
+                            //no diamond on decl site
+                            if (taDecl1 == TypeArgumentKind.DIAMOND) continue;
+                            for (TypeArgumentKind taSite1 : TypeArgumentKind.values()) {
+                                boolean isSiteRaw = taSite1 == TypeArgumentKind.NONE;
+                                //diamond only allowed on the last type qualifier
+                                if (taSite1 == TypeArgumentKind.DIAMOND &&
+                                        innerClassDeclArity != InnerClassDeclArity.ONE) continue;
+                                for (ArgumentKind arg1 : ArgumentKind.values()) {
+                                    if (innerClassDeclArity == innerClassDeclArity.ONE) {
+                                        new DiamondAndInnerClassTest(innerClassDeclArity, declType, newClassType,
+                                                argList, new TypeArgumentKind[] {taDecl1},
+                                                new TypeArgumentKind[] {taSite1}, new ArgumentKind[] {arg1}).run(comp, fm);
+                                        continue;
+                                    }
+                                    for (TypeArgumentKind taDecl2 : TypeArgumentKind.values()) {
+                                        //no rare types
+                                        if (isDeclRaw != (taDecl2 == TypeArgumentKind.NONE)) continue;
+                                        //no diamond on decl site
+                                        if (taDecl2 == TypeArgumentKind.DIAMOND) continue;
+                                        for (TypeArgumentKind taSite2 : TypeArgumentKind.values()) {
+                                            //no rare types
+                                            if (isSiteRaw != (taSite2 == TypeArgumentKind.NONE)) continue;
+                                            //diamond only allowed on the last type qualifier
+                                            if (taSite2 == TypeArgumentKind.DIAMOND &&
+                                                    innerClassDeclArity != InnerClassDeclArity.TWO) continue;
+                                            for (ArgumentKind arg2 : ArgumentKind.values()) {
+                                                if (innerClassDeclArity == innerClassDeclArity.TWO) {
+                                                    new DiamondAndInnerClassTest(innerClassDeclArity, declType, newClassType,
+                                                            argList, new TypeArgumentKind[] {taDecl1, taDecl2},
+                                                            new TypeArgumentKind[] {taSite1, taSite2},
+                                                            new ArgumentKind[] {arg1, arg2}).run(comp, fm);
+                                                    continue;
+                                                }
+                                                for (TypeArgumentKind taDecl3 : TypeArgumentKind.values()) {
+                                                    //no rare types
+                                                    if (isDeclRaw != (taDecl3 == TypeArgumentKind.NONE)) continue;
+                                                    //no diamond on decl site
+                                                    if (taDecl3 == TypeArgumentKind.DIAMOND) continue;
+                                                    for (TypeArgumentKind taSite3 : TypeArgumentKind.values()) {
+                                                        //no rare types
+                                                        if (isSiteRaw != (taSite3 == TypeArgumentKind.NONE)) continue;
+                                                        //diamond only allowed on the last type qualifier
+                                                        if (taSite3 == TypeArgumentKind.DIAMOND &&
+                                                                innerClassDeclArity != InnerClassDeclArity.THREE) continue;
+                                                        for (ArgumentKind arg3 : ArgumentKind.values()) {
+                                                            if (innerClassDeclArity == innerClassDeclArity.THREE) {
+                                                                new DiamondAndInnerClassTest(innerClassDeclArity, declType, newClassType,
+                                                                        argList, new TypeArgumentKind[] {taDecl1, taDecl2, taDecl3},
+                                                                        new TypeArgumentKind[] {taSite1, taSite2, taSite3},
+                                                                        new ArgumentKind[] {arg1, arg2, arg3}).run(comp, fm);
+                                                                continue;
+                                                            }
+                                                        }
+                                                    }
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        System.out.println("Total check executed: " + checkCount);
+    }
+
+    InnerClassDeclArity innerClassDeclArity;
+    TypeQualifierArity declType;
+    TypeQualifierArity siteType;
+    ArgumentListArity argList;
+    TypeArgumentKind[] declTypeArgumentKinds;
+    TypeArgumentKind[] siteTypeArgumentKinds;
+    ArgumentKind[] argumentKinds;
+    JavaSource source;
+    DiagnosticChecker diagChecker;
+
+    DiamondAndInnerClassTest(InnerClassDeclArity innerClassDeclArity,
+            TypeQualifierArity declType, TypeQualifierArity siteType, ArgumentListArity argList,
+            TypeArgumentKind[] declTypeArgumentKinds, TypeArgumentKind[] siteTypeArgumentKinds,
+            ArgumentKind[] argumentKinds) {
+        this.innerClassDeclArity = innerClassDeclArity;
+        this.declType = declType;
+        this.siteType = siteType;
+        this.argList = argList;
+        this.declTypeArgumentKinds = declTypeArgumentKinds;
+        this.siteTypeArgumentKinds = siteTypeArgumentKinds;
+        this.argumentKinds = argumentKinds;
+        this.source = new JavaSource();
+        this.diagChecker = new DiagnosticChecker();
+    }
+
+    class JavaSource extends SimpleJavaFileObject {
+
+        String bodyTemplate = "#D res = new #S#AL;";
+
+        String source;
+
+        public JavaSource() {
+            super(URI.create("myfo:/Test.java"), JavaFileObject.Kind.SOURCE);
+            source = innerClassDeclArity.classDeclStr.replace("#B", bodyTemplate)
+                             .replace("#D", declType.getType(declTypeArgumentKinds))
+                             .replace("#S", siteType.getType(siteTypeArgumentKinds))
+                             .replace("#AL", argList.getArgs(argumentKinds));
+        }
+
+        @Override
+        public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+            return source;
+        }
+    }
+
+    void run(JavaCompiler tool, StandardJavaFileManager fm) throws Exception {
+        JavacTask ct = (JavacTask)tool.getTask(null, fm, diagChecker,
+                null, null, Arrays.asList(source));
+        try {
+            ct.analyze();
+        } catch (Throwable ex) {
+            throw new AssertionError("Error thron when compiling the following code:\n" + source.getCharContent(true));
+        }
+        check();
+    }
+
+    void check() {
+        checkCount++;
+
+        boolean errorExpected = false;
+
+        TypeArgumentKind[] expectedArgKinds = new TypeArgumentKind[innerClassDeclArity.n];
+
+        for (int i = 0 ; i < innerClassDeclArity.n ; i++) {
+            if (!declTypeArgumentKinds[i].compatible(siteTypeArgumentKinds[i])) {
+                errorExpected = true;
+                break;
+            }
+            expectedArgKinds[i] = siteTypeArgumentKinds[i] == TypeArgumentKind.DIAMOND ?
+                declTypeArgumentKinds[i] : siteTypeArgumentKinds[i];
+        }
+
+        if (!errorExpected) {
+            for (int i = 0 ; i < innerClassDeclArity.n ; i++) {
+                //System.out.println("check " + expectedArgKinds[i] + " against " + argumentKinds[i]);
+                if (!expectedArgKinds[i].compatible(argumentKinds[i])) {
+                    errorExpected = true;
+                    break;
+                }
+            }
+        }
+
+        if (errorExpected != diagChecker.errorFound) {
+            throw new Error("invalid diagnostics for source:\n" +
+                source.getCharContent(true) +
+                "\nFound error: " + diagChecker.errorFound +
+                "\nExpected error: " + errorExpected);
+        }
+    }
+
+    static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+        boolean errorFound;
+
+        public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+            if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+                errorFound = true;
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/diamond/7057297/T7057297.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,29 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug 7057297
+ *
+ * @summary Project Coin: diamond erroneously accepts in array initializer expressions
+ * @compile/fail/ref=T7057297.out T7057297.java -XDrawDiagnostics
+ *
+ */
+
+class T7205797<X> {
+
+    class Inner<Y> {}
+
+    T7205797<String>[] o1 = new T7205797<>[1]; //error
+    T7205797<String>[] o2 = new T7205797<>[1][1]; //error
+    T7205797<String>[] o3 = new T7205797<>[1][1][1]; //error
+
+    T7205797<String>[] o4 = new T7205797<>[] { }; //error
+    T7205797<String>[] o5 = new T7205797<>[][] { }; //error
+    T7205797<String>[] o6 = new T7205797<>[][][] { }; //error
+
+    T7205797<String>.Inner<String>[] o1 = new T7205797<String>.Inner<>[1]; //error
+    T7205797<String>.Inner<String>[] o2 = new T7205797<String>.Inner<>[1][1]; //error
+    T7205797<String>.Inner<String>[] o3 = new T7205797<String>.Inner<>[1][1][1]; //error
+
+    T7205797<String>.Inner<String>[] o4 = new T7205797<String>.Inner<>[] { }; //error
+    T7205797<String>.Inner<String>[] o5 = new T7205797<String>.Inner<>[][] { }; //error
+    T7205797<String>.Inner<String>[] o6 = new T7205797<String>.Inner<>[][][] { }; //error
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/diamond/7057297/T7057297.out	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,13 @@
+T7057297.java:14:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:15:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:16:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:18:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:19:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:20:41: compiler.err.cannot.create.array.with.diamond
+T7057297.java:22:69: compiler.err.cannot.create.array.with.diamond
+T7057297.java:23:69: compiler.err.cannot.create.array.with.diamond
+T7057297.java:24:69: compiler.err.cannot.create.array.with.diamond
+T7057297.java:26:69: compiler.err.cannot.create.array.with.diamond
+T7057297.java:27:69: compiler.err.cannot.create.array.with.diamond
+T7057297.java:28:69: compiler.err.cannot.create.array.with.diamond
+12 errors
--- a/langtools/test/tools/javac/generics/diamond/neg/Neg09.out	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/generics/diamond/neg/Neg09.out	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
-Neg09.java:17:34: compiler.err.cant.apply.diamond.1: Neg09.Member, (compiler.misc.diamond.and.anon.class: Neg09.Member)
-Neg09.java:18:34: compiler.err.cant.apply.diamond.1: Neg09.Nested, (compiler.misc.diamond.and.anon.class: Neg09.Nested)
-Neg09.java:22:39: compiler.err.cant.apply.diamond.1: Neg09.Member, (compiler.misc.diamond.and.anon.class: Neg09.Member)
-Neg09.java:23:40: compiler.err.cant.apply.diamond.1: Neg09.Nested, (compiler.misc.diamond.and.anon.class: Neg09.Nested)
+Neg09.java:17:34: compiler.err.cant.apply.diamond.1: Neg09.Member<X>, (compiler.misc.diamond.and.anon.class: Neg09.Member<X>)
+Neg09.java:18:34: compiler.err.cant.apply.diamond.1: Neg09.Nested<X>, (compiler.misc.diamond.and.anon.class: Neg09.Nested<X>)
+Neg09.java:22:39: compiler.err.cant.apply.diamond.1: Neg09.Member<X>, (compiler.misc.diamond.and.anon.class: Neg09.Member<X>)
+Neg09.java:23:40: compiler.err.cant.apply.diamond.1: Neg09.Nested<X>, (compiler.misc.diamond.and.anon.class: Neg09.Nested<X>)
 4 errors
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/rawOverride/7062745/GenericOverrideTest.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7062745
+ * @summary  Regression: difference in overload resolution when two methods are maximally specific
+ */
+
+import com.sun.source.util.JavacTask;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class GenericOverrideTest {
+
+    static int checkCount = 0;
+
+    enum SignatureKind {
+        NON_GENERIC(""),
+        GENERIC("<X>");
+
+        String paramStr;
+
+        private SignatureKind(String paramStr) {
+            this.paramStr = paramStr;
+        }
+    }
+
+    enum ReturnTypeKind {
+        LIST("List"),
+        ARRAYLIST("ArrayList");
+
+        String retStr;
+
+        private ReturnTypeKind(String retStr) {
+            this.retStr = retStr;
+        }
+
+        boolean moreSpecificThan(ReturnTypeKind that) {
+            switch (this) {
+                case LIST:
+                    return that == this;
+                case ARRAYLIST:
+                    return that == LIST || that == ARRAYLIST;
+                default: throw new AssertionError("Unexpected ret kind: " + this);
+            }
+        }
+    }
+
+    enum TypeArgumentKind {
+        NONE(""),
+        UNBOUND("<?>"),
+        INTEGER("<Number>"),
+        NUMBER("<Integer>"),
+        TYPEVAR("<X>");
+
+        String typeargStr;
+
+        private TypeArgumentKind(String typeargStr) {
+            this.typeargStr = typeargStr;
+        }
+
+        boolean compatibleWith(SignatureKind sig) {
+            switch (this) {
+                case TYPEVAR: return sig != SignatureKind.NON_GENERIC;
+                default: return true;
+            }
+        }
+
+        boolean moreSpecificThan(TypeArgumentKind that, boolean strict) {
+            switch (this) {
+                case NONE:
+                    return that == this || !strict;
+                case UNBOUND:
+                    return that == this || that == NONE;
+                case INTEGER:
+                case NUMBER:
+                case TYPEVAR:
+                    return that == this || that == NONE || that == UNBOUND;
+                default: throw new AssertionError("Unexpected typearg kind: " + this);
+            }
+        }
+
+        boolean assignableTo(TypeArgumentKind that, SignatureKind sig) {
+            switch (this) {
+                case NONE:
+                    //this case needs to workaround to javac's impl of 15.12.2.8 being too strict
+                    //ideally should be just 'return true' (see 7067746)
+                    return sig == SignatureKind.NON_GENERIC || that == NONE;
+                case UNBOUND:
+                    return that == this || that == NONE;
+                case INTEGER:
+                case NUMBER:
+                    return that == this || that == NONE || that == UNBOUND;
+                case TYPEVAR:
+                    return true;
+                default: throw new AssertionError("Unexpected typearg kind: " + this);
+            }
+        }
+    }
+
+    public static void main(String... args) throws Exception {
+
+        //create default shared JavaCompiler - reused across multiple compilations
+        JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+        StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+
+        for (SignatureKind sig1 : SignatureKind.values()) {
+            for (ReturnTypeKind rt1 : ReturnTypeKind.values()) {
+                for (TypeArgumentKind ta1 : TypeArgumentKind.values()) {
+                    if (!ta1.compatibleWith(sig1)) continue;
+                    for (SignatureKind sig2 : SignatureKind.values()) {
+                        for (ReturnTypeKind rt2 : ReturnTypeKind.values()) {
+                            for (TypeArgumentKind ta2 : TypeArgumentKind.values()) {
+                                if (!ta2.compatibleWith(sig2)) continue;
+                                for (ReturnTypeKind rt3 : ReturnTypeKind.values()) {
+                                    for (TypeArgumentKind ta3 : TypeArgumentKind.values()) {
+                                        if (!ta3.compatibleWith(SignatureKind.NON_GENERIC)) continue;
+                                        new GenericOverrideTest(sig1, rt1, ta1, sig2, rt2, ta2, rt3, ta3).run(comp, fm);
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        System.out.println("Total check executed: " + checkCount);
+    }
+
+    SignatureKind sig1, sig2;
+    ReturnTypeKind rt1, rt2, rt3;
+    TypeArgumentKind ta1, ta2, ta3;
+    JavaSource source;
+    DiagnosticChecker diagChecker;
+
+    GenericOverrideTest(SignatureKind sig1, ReturnTypeKind rt1, TypeArgumentKind ta1,
+            SignatureKind sig2, ReturnTypeKind rt2, TypeArgumentKind ta2, ReturnTypeKind rt3, TypeArgumentKind ta3) {
+        this.sig1 = sig1;
+        this.sig2 = sig2;
+        this.rt1 = rt1;
+        this.rt2 = rt2;
+        this.rt3 = rt3;
+        this.ta1 = ta1;
+        this.ta2 = ta2;
+        this.ta3 = ta3;
+        this.source = new JavaSource();
+        this.diagChecker = new DiagnosticChecker();
+    }
+
+    class JavaSource extends SimpleJavaFileObject {
+
+        String template = "import java.util.*;\n" +
+                          "interface A { #S1 #R1#TA1 m(); }\n" +
+                          "interface B { #S2 #R2#TA2 m(); }\n" +
+                          "interface AB extends A, B {}\n" +
+                          "class Test {\n" +
+                          "  void test(AB ab) { #R3#TA3 n = ab.m(); }\n" +
+                          "}";
+
+        String source;
+
+        public JavaSource() {
+            super(URI.create("myfo:/Test.java"), JavaFileObject.Kind.SOURCE);
+            source = template.replace("#S1", sig1.paramStr).
+                    replace("#S2", sig2.paramStr).
+                    replace("#R1", rt1.retStr).
+                    replace("#R2", rt2.retStr).
+                    replace("#R3", rt3.retStr).
+                    replace("#TA1", ta1.typeargStr).
+                    replace("#TA2", ta2.typeargStr).
+                    replace("#TA3", ta3.typeargStr);
+        }
+
+        @Override
+        public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+            return source;
+        }
+    }
+
+    void run(JavaCompiler tool, StandardJavaFileManager fm) throws Exception {
+        JavacTask ct = (JavacTask)tool.getTask(null, fm, diagChecker,
+                null, null, Arrays.asList(source));
+        try {
+            ct.analyze();
+        } catch (Throwable ex) {
+            throw new AssertionError("Error thron when compiling the following code:\n" + source.getCharContent(true));
+        }
+        check();
+    }
+
+    void check() {
+        checkCount++;
+
+        boolean errorExpected = false;
+        int mostSpecific = 0;
+
+        //first check that either |R1| <: |R2| or |R2| <: |R1|
+        if (rt1 != rt2) {
+            if (!rt1.moreSpecificThan(rt2) &&
+                    !rt2.moreSpecificThan(rt1)) {
+                errorExpected = true;
+            } else {
+                mostSpecific = rt1.moreSpecificThan(rt2) ? 1 : 2;
+            }
+        }
+
+        //check that either TA1 <= TA2 or TA2 <= TA1 (unless most specific return found above is raw)
+        if (!errorExpected) {
+            if (ta1 != ta2) {
+                boolean useStrictCheck = ta1.moreSpecificThan(ta2, true) || ta2.moreSpecificThan(ta1, true);
+                if (!ta1.moreSpecificThan(ta2, useStrictCheck) &&
+                        !ta2.moreSpecificThan(ta1, useStrictCheck)) {
+                    errorExpected = true;
+                } else {
+                    int mostSpecific2 = ta1.moreSpecificThan(ta2, useStrictCheck) ? 1 : 2;
+                    if (mostSpecific != 0 && mostSpecific2 != mostSpecific) {
+                        errorExpected = mostSpecific == 1 ? ta1 != TypeArgumentKind.NONE : ta2 != TypeArgumentKind.NONE;
+                    } else {
+                        mostSpecific = mostSpecific2;
+                    }
+                }
+            } else if (mostSpecific == 0) {
+                //when no signature is better than the other, an arbitrary choice
+                //must be made - javac always picks the second signature
+                mostSpecific = 2;
+            }
+        }
+
+        //finally, check that most specific return type is compatible with expected type
+        if (!errorExpected) {
+            ReturnTypeKind msrt = mostSpecific == 1 ? rt1 : rt2;
+            TypeArgumentKind msta = mostSpecific == 1 ? ta1 : ta2;
+            SignatureKind mssig = mostSpecific == 1 ? sig1 : sig2;
+
+            if (!msrt.moreSpecificThan(rt3) ||
+                    !msta.assignableTo(ta3, mssig)) {
+                errorExpected = true;
+            }
+        }
+
+        if (errorExpected != diagChecker.errorFound) {
+            throw new Error("invalid diagnostics for source:\n" +
+                source.getCharContent(true) +
+                "\nFound error: " + diagChecker.errorFound +
+                "\nExpected error: " + errorExpected);
+        }
+    }
+
+    static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+        boolean errorFound;
+
+        public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+            if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+                errorFound = true;
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/rawOverride/7062745/T7062745neg.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,18 @@
+/*
+ * @test /nodynamiccopyright/
+ * @bug     7062745
+ * @summary  Regression: difference in overload resolution when two methods are maximally specific
+ * @compile/fail/ref=T7062745neg.out -XDrawDiagnostics T7062745neg.java
+ */
+
+import java.util.*;
+
+class T7062745neg {
+    interface A { List<Number> getList(); }
+    interface B { ArrayList getList(); }
+    interface AB extends A, B {}
+
+    void test(AB ab) {
+        Number n = ab.getList().get(1);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/rawOverride/7062745/T7062745neg.out	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,2 @@
+T7062745neg.java:16:36: compiler.err.prob.found.req: (compiler.misc.incompatible.types), java.lang.Object, java.lang.Number
+1 error
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/generics/rawOverride/7062745/T7062745pos.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7062745
+ * @summary  Regression: difference in overload resolution when two methods are maximally specific
+ *
+ * @compile T7062745pos.java
+ */
+
+import java.util.*;
+
+class T7062745pos {
+    interface A { List<Number> getList(); }
+    interface B { List getList(); }
+    interface AB extends A, B {}
+
+    void test(AB ab) {
+        Number n = ab.getList().get(1);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lib/CompileFail.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.*;
+import java.util.*;
+
+/*
+ * Utility class to emulate jtreg @compile/fail, but also checking the specific
+ * exit code, given as the first arg.
+ */
+public class CompileFail {
+    public static void main(String... args) {
+        if (args.length < 2)
+            throw new IllegalArgumentException("insufficient args");
+        int expected_rc = getReturnCode(args[0]);
+
+        List<String> javacArgs = new ArrayList<>();
+        javacArgs.addAll(Arrays.asList(
+            "-bootclasspath", System.getProperty("sun.boot.class.path"),
+            "-d", "."
+        ));
+
+        File testSrc = new File(System.getProperty("test.src"));
+        for (int i = 1; i < args.length; i++) { // skip first arg
+            String arg = args[i];
+            if (arg.endsWith(".java"))
+                javacArgs.add(new File(testSrc, arg).getPath());
+            else
+                javacArgs.add(arg);
+        }
+
+        int rc = com.sun.tools.javac.Main.compile(
+            javacArgs.toArray(new String[javacArgs.size()]));
+
+        if (rc != expected_rc)
+            throw new Error("unexpected exit code: " + rc
+                        + ", expected: " + expected_rc);
+    }
+
+    static int getReturnCode(String name) {
+        switch (name) {
+            case "OK":
+                return EXIT_OK;
+
+            case "ERROR":
+                return EXIT_ERROR;
+
+            case "CMDERR":
+                return EXIT_CMDERR;
+
+            case "SYSERR":
+                return EXIT_SYSERR;
+
+            case "ABNORMAL":
+                return EXIT_ABNORMAL;
+
+            default:
+                throw new IllegalArgumentException(name);
+        }
+    }
+
+    // The following is cut-n-paste from com.sun.tools.javac.main.Main
+    static final int
+        EXIT_OK = 0,        // Compilation completed with no errors.
+        EXIT_ERROR = 1,     // Completed but reported errors.
+        EXIT_CMDERR = 2,    // Bad command-line arguments
+        EXIT_SYSERR = 3,    // System error or resource exhaustion.
+        EXIT_ABNORMAL = 4;  // Compiler terminated abnormally
+}
--- a/langtools/test/tools/javac/lib/JavacTestingAbstractProcessor.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/lib/JavacTestingAbstractProcessor.java	Wed Aug 17 22:47:12 2011 -0700
@@ -25,6 +25,7 @@
 import javax.annotation.processing.*;
 import javax.lang.model.SourceVersion;
 import javax.lang.model.util.*;
+import static javax.lang.model.SourceVersion.*;
 
 /**
  * An abstract annotation processor tailored to javac regression testing.
@@ -95,4 +96,164 @@
         messager  = processingEnv.getMessager();
         options   = processingEnv.getOptions();
     }
+
+    /*
+     * The set of visitors below will directly extend the most recent
+     * corresponding platform visitor type.
+     */
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static abstract class AbstractAnnotationValueVisitor<R, P> extends AbstractAnnotationValueVisitor8<R, P> {
+
+        /**
+         * Constructor for concrete subclasses to call.
+         */
+        protected AbstractAnnotationValueVisitor() {
+            super();
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static abstract class AbstractElementVisitor<R, P> extends AbstractElementVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses to call.
+         */
+        protected AbstractElementVisitor(){
+            super();
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static abstract class AbstractTypeVisitor<R, P> extends AbstractTypeVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses to call.
+         */
+        protected AbstractTypeVisitor() {
+            super();
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class ElementKindVisitor<R, P> extends ElementKindVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses; uses {@code null} for the
+         * default value.
+         */
+        protected ElementKindVisitor() {
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses; uses the argument for the
+         * default value.
+         *
+         * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+         */
+        protected ElementKindVisitor(R defaultValue) {
+            super(defaultValue);
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class ElementScanner<R, P> extends ElementScanner8<R, P> {
+        /**
+         * Constructor for concrete subclasses; uses {@code null} for the
+         * default value.
+         */
+        protected ElementScanner(){
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses; uses the argument for the
+         * default value.
+         */
+        protected ElementScanner(R defaultValue){
+            super(defaultValue);
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class SimpleAnnotationValueVisitor<R, P> extends SimpleAnnotationValueVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses; uses {@code null} for the
+         * default value.
+         */
+        protected SimpleAnnotationValueVisitor() {
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses; uses the argument for the
+         * default value.
+         *
+         * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+         */
+        protected SimpleAnnotationValueVisitor(R defaultValue) {
+            super(defaultValue);
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class SimpleElementVisitor<R, P> extends SimpleElementVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses; uses {@code null} for the
+         * default value.
+         */
+        protected SimpleElementVisitor(){
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses; uses the argument for the
+         * default value.
+         *
+         * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+         */
+        protected SimpleElementVisitor(R defaultValue){
+            super(defaultValue);
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class SimpleTypeVisitor<R, P> extends SimpleTypeVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses; uses {@code null} for the
+         * default value.
+         */
+        protected SimpleTypeVisitor(){
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses; uses the argument for the
+         * default value.
+         *
+         * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+         */
+        protected SimpleTypeVisitor(R defaultValue){
+            super(defaultValue);
+        }
+    }
+
+    @SupportedSourceVersion(RELEASE_8)
+    public static class TypeKindVisitor<R, P> extends TypeKindVisitor8<R, P> {
+        /**
+         * Constructor for concrete subclasses to call; uses {@code null}
+         * for the default value.
+         */
+        protected TypeKindVisitor() {
+            super(null);
+        }
+
+        /**
+         * Constructor for concrete subclasses to call; uses the argument
+         * for the default value.
+         *
+         * @param defaultValue the value to assign to {@link #DEFAULT_VALUE}
+         */
+        protected TypeKindVisitor(R defaultValue) {
+            super(defaultValue);
+        }
+    }
 }
--- a/langtools/test/tools/javac/multicatch/model/ModelChecker.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/multicatch/model/ModelChecker.java	Wed Aug 17 22:47:12 2011 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 6993963
+ * @bug 6993963 7025809
  * @summary Project Coin: Use precise exception analysis for effectively final catch parameters
  * @library ../../lib
  * @build JavacTestingAbstractProcessor ModelChecker
@@ -107,7 +107,7 @@
             ; // Expected
         }
 
-        UnionType unionType = new SimpleTypeVisitor7<UnionType, Void>(){
+        UnionType unionType = new SimpleTypeVisitor<UnionType, Void>(){
             @Override
             protected UnionType defaultAction(TypeMirror e, Void p) {return null;}
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/parser/StringFoldingTest.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7068902
+ * @summary verify that string folding can be enabled or disabled
+ */
+
+import com.sun.source.tree.CompilationUnitTree;
+import com.sun.source.util.JavacTask;
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.ToolProvider;
+
+public class StringFoldingTest {
+    final JavaCompiler tool;
+    final JavaSource source;
+
+    public StringFoldingTest() {
+        tool = ToolProvider.getSystemJavaCompiler();
+        source = new JavaSource();
+    }
+
+    static class JavaSource extends SimpleJavaFileObject {
+
+        final static String source =
+                "class C {String X=\"F\" + \"O\" + \"L\" + \"D\" + \"E\" + \"D\";}";
+
+        JavaSource() {
+            super(URI.create("myfo:/C.java"), JavaFileObject.Kind.SOURCE);
+        }
+
+        @Override
+        public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+            return source;
+        }
+    }
+
+    public static void main(String... args) throws IOException {
+        StringFoldingTest t = new StringFoldingTest();
+        t.run(false);
+        t.run(true);
+    }
+
+    void run(boolean disableStringFolding) throws IOException {
+        List<String> argsList = new ArrayList<String>();
+        if (disableStringFolding) {
+            argsList.add("-XDallowStringFolding=false");
+        }
+        JavacTask ct = (JavacTask)tool.getTask(null, null, null,
+                argsList,
+                null,
+                Arrays.asList(source));
+        Iterable<? extends CompilationUnitTree> trees = ct.parse();
+        String text = trees.toString();
+        System.out.println(text);
+
+        if (disableStringFolding) {
+            if (text.contains("FOLDED")) {
+                throw new AssertionError("Expected string folding");
+            }
+        } else {
+            if (!text.contains("FOLDED")) {
+                throw new AssertionError("Expected no string folding");
+            }
+        }
+    }
+}
--- a/langtools/test/tools/javac/processing/errors/TestOptionSyntaxErrors.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/errors/TestOptionSyntaxErrors.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,14 +27,14 @@
  * @summary Test that annotation processor options with illegal syntax are rejected
  * @author  Joseph D. Darcy
  * @library ../../lib
- * @build JavacTestingAbstractProcessor
+ * @build JavacTestingAbstractProcessor CompileFail
  * @compile TestOptionSyntaxErrors.java
- * @compile/fail -A TestOptionSyntaxErrors.java
- * @compile/fail -A8adOption TestOptionSyntaxErrors.java
- * @compile/fail -A8adOption=1worseOption TestOptionSyntaxErrors.java
- * @compile/fail -processor TestOptionSyntaxErrors -proc:only -A TestOptionSyntaxErrors.java
- * @compile/fail -processor TestOptionSyntaxErrors -proc:only -A8adOption TestOptionSyntaxErrors.java
- * @compile/fail -processor TestOptionSyntaxErrors -proc:only -A8adOption=1worseOption TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -A TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -A8adOption TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -A8adOption=1worseOption TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -processor TestOptionSyntaxErrors -proc:only -A TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -processor TestOptionSyntaxErrors -proc:only -A8adOption TestOptionSyntaxErrors.java
+ * @run main CompileFail CMDERR -processor TestOptionSyntaxErrors -proc:only -A8adOption=1worseOption TestOptionSyntaxErrors.java
  */
 
 import java.util.Set;
--- a/langtools/test/tools/javac/processing/errors/TestReturnCode.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/errors/TestReturnCode.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,25 +27,25 @@
  * @summary Test that an erroneous return code results from raising an error.
  * @author  Joseph D. Darcy
  * @library ../../lib
- * @build JavacTestingAbstractProcessor
+ * @build JavacTestingAbstractProcessor CompileFail
  * @compile TestReturnCode.java
  *
- * @compile      -processor TestReturnCode -proc:only                                                                   Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                                                    -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                                      -AErrorOnLast                Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                                      -AErrorOnLast -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                   -AExceptionOnFirst                              Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                   -AExceptionOnFirst               -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                   -AExceptionOnFirst -AErrorOnLast                Foo.java
- * @compile/fail -processor TestReturnCode -proc:only                   -AExceptionOnFirst -AErrorOnLast -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast                                                 Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast                                  -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast                    -AErrorOnLast                Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast                    -AErrorOnLast -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst                              Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst               -AErrorOnFirst Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst -AErrorOnLast                Foo.java
- * @compile/fail -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst -AErrorOnLast -AErrorOnFirst Foo.java
+ * @compile                     -processor TestReturnCode -proc:only                                                                   Foo.java
+ * @run main CompileFail ERROR  -processor TestReturnCode -proc:only                                                    -AErrorOnFirst Foo.java
+ * @run main CompileFail ERROR  -processor TestReturnCode -proc:only                                      -AErrorOnLast                Foo.java
+ * @run main CompileFail ERROR  -processor TestReturnCode -proc:only                                      -AErrorOnLast -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only                   -AExceptionOnFirst                              Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only                   -AExceptionOnFirst               -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only                   -AExceptionOnFirst -AErrorOnLast                Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only                   -AExceptionOnFirst -AErrorOnLast -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast                                                 Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast                                  -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast                    -AErrorOnLast                Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast                    -AErrorOnLast -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst                              Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst               -AErrorOnFirst Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst -AErrorOnLast                Foo.java
+ * @run main CompileFail SYSERR -processor TestReturnCode -proc:only -AExceptionOnLast -AExceptionOnFirst -AErrorOnLast -AErrorOnFirst Foo.java
  */
 
 import java.util.Set;
--- a/langtools/test/tools/javac/processing/model/6194785/T6194785.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/6194785/T6194785.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -21,7 +21,7 @@
  * questions.
  */
 
-/**
+/*
  * @test
  * @bug     6194785
  * @summary ParameterDeclaration.getSimpleName does not return actual name from class files
@@ -40,9 +40,8 @@
 
 public class T6194785 extends JavacTestingAbstractProcessor {
     public boolean process(Set<? extends TypeElement> annotations,
-                           RoundEnvironment roundEnvironment)
-    {
-        class Scan extends ElementScanner7<Void,Void> {
+                           RoundEnvironment roundEnvironment) {
+        class Scan extends ElementScanner<Void,Void> {
             @Override
             public Void visitExecutable(ExecutableElement e, Void ignored) {
                 for (VariableElement p : e.getParameters())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/processing/model/TestSourceVersion.java	Wed Aug 17 22:47:12 2011 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7025809
+ * @summary Test latest and latestSupported
+ * @author  Joseph D. Darcy
+ */
+
+import javax.lang.model.SourceVersion;
+import static javax.lang.model.SourceVersion.*;
+
+/**
+ * Verify latest[Supported] behavior.
+ */
+public class TestSourceVersion {
+    public static void main(String... args) {
+        if (SourceVersion.latest() != RELEASE_8 ||
+            SourceVersion.latestSupported() != RELEASE_8)
+            throw new RuntimeException("Unexpected release value(s) found:\n" +
+                                       "latest:\t" + SourceVersion.latest() + "\n" +
+                                       "latestSupported:\t" + SourceVersion.latestSupported());
+    }
+}
--- a/langtools/test/tools/javac/processing/model/TestSymtabItems.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/TestSymtabItems.java	Wed Aug 17 22:47:12 2011 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 7021183
+ * @bug 7021183 7025809
  * @summary 269: assertion failure getting enclosing element of an undefined name
  */
 
@@ -37,9 +37,7 @@
 import javax.lang.model.element.VariableElement;
 import javax.lang.model.type.TypeMirror;
 import javax.lang.model.type.UnknownTypeException;
-import javax.lang.model.util.ElementScanner7;
-import javax.lang.model.util.SimpleTypeVisitor7;
-import javax.lang.model.util.Types;
+import javax.lang.model.util.*;
 
 import com.sun.tools.javac.code.Symbol.ClassSymbol;
 import com.sun.tools.javac.code.Symtab;
@@ -112,7 +110,7 @@
 
     int errors;
 
-    class ElemPrinter extends ElementScanner7<Void, Void> {
+    class ElemPrinter extends ElementScanner8<Void, Void> {
         @Override
         public Void visitPackage(PackageElement e, Void p) {
             show("package", e);
--- a/langtools/test/tools/javac/processing/model/element/TestMissingElement/TestMissingElement.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/element/TestMissingElement/TestMissingElement.java	Wed Aug 17 22:47:12 2011 -0700
@@ -24,7 +24,7 @@
 
 /*
  * @test
- * @bug 6639645 7026414
+ * @bug 6639645 7026414 7025809
  * @summary Modeling type implementing missing interfaces
  * @library ../../../../lib
  * @build JavacTestingAbstractProcessor TestMissingElement
@@ -104,7 +104,7 @@
     private String asString(TypeMirror t) {
         if (t == null)
             return "[typ:null]";
-        return t.accept(new SimpleTypeVisitor7<String, Void>() {
+        return t.accept(new SimpleTypeVisitor<String, Void>() {
             @Override
             public String defaultAction(TypeMirror t, Void ignore) {
                 return "[typ:" + t.toString() + "]";
@@ -135,7 +135,7 @@
     private String asString(Element e) {
         if (e == null)
             return "[elt:null]";
-        return e.accept(new SimpleElementVisitor7<String, Void>() {
+        return e.accept(new SimpleElementVisitor<String, Void>() {
             @Override
             public String defaultAction(Element e, Void ignore) {
                 return "[elt:" + e.getKind() + " " + e.toString() + "]";
--- a/langtools/test/tools/javac/processing/model/element/TestResourceVariable.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/element/TestResourceVariable.java	Wed Aug 17 22:47:12 2011 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug  6911256 6964740 6967842 6961571
+ * @bug  6911256 6964740 6967842 6961571 7025809
  * @summary Test that the resource variable kind is appropriately set
  * @author  Joseph D. Darcy
  * @library ../../../lib
@@ -44,8 +44,8 @@
 
 /**
  * Using the tree API, retrieve element representations of the
- * resource of an ARM block and verify their kind tags are set
- * appropriately.
+ * resource of a try-with-resources statement and verify their kind
+ * tags are set appropriately.
  */
 public class TestResourceVariable extends JavacTestingAbstractProcessor implements AutoCloseable {
     int resourceVariableCount = 0;
@@ -82,7 +82,7 @@
 
     /**
      * Verify that a resource variable modeled as an element behaves
-     * as expected under 6 and 7 specific visitors.
+     * as expected under 6 and latest specific visitors.
      */
     private static void testResourceVariable(Element element) {
         ElementVisitor visitor6 = new ElementKindVisitor6<Void, Void>() {};
@@ -94,7 +94,8 @@
             ; // Expected.
         }
 
-        ElementKindVisitor7 visitor7 = new ElementKindVisitor7<Object, Void>() {
+        ElementKindVisitor visitorLatest =
+            new ElementKindVisitor<Object, Void>() {
             @Override
             public Object visitVariableAsResourceVariable(VariableElement e,
                                                           Void p) {
@@ -102,7 +103,7 @@
             }
         };
 
-        if (visitor7.visit(element) == null) {
+        if (visitorLatest.visit(element) == null) {
             throw new RuntimeException("Null result of resource variable visitation.");
         }
     }
--- a/langtools/test/tools/javac/processing/model/type/NoTypes.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/type/NoTypes.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug     6418666 6423973 6453386
+ * @bug     6418666 6423973 6453386 7025809
  * @summary Test the NoTypes: VOID, PACKAGE, NONE
  * @author  Scott Seligman
  * @library ../../../lib
@@ -75,7 +75,7 @@
         verifyKind(NONE, types.getNoType(NONE));
 
         // The return type of a constructor or void method is VOID.
-        class Scanner extends ElementScanner7<Void, Void> {
+        class Scanner extends ElementScanner<Void, Void> {
             @Override
             public Void visitExecutable(ExecutableElement e, Void p) {
                 verifyKind(VOID, e.getReturnType());
@@ -89,11 +89,11 @@
     }
 
     /**
-     * Verify that a NoType instance is of a particular kind,
-     * and that TypeKindVisitor7 properly dispatches on it.
+     * Verify that a NoType instance is of a particular kind, and that
+     * the latest TypeKindVisitor properly dispatches on it.
      */
     private void verifyKind(TypeKind kind, TypeMirror type) {
-        class Vis extends TypeKindVisitor7<TypeKind, Void> {
+        class Vis extends TypeKindVisitor<TypeKind, Void> {
             @Override
             public TypeKind visitNoTypeAsVoid(NoType t, Void p) {
                 return VOID;
@@ -111,9 +111,7 @@
             throw new AssertionError();
     }
 
-
     // Fodder for the tests
-
     interface I {
     }
 
--- a/langtools/test/tools/javac/processing/model/type/TestUnionType.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/type/TestUnionType.java	Wed Aug 17 22:47:12 2011 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug     7029150
+ * @bug     7029150 7025809
  * @summary Test support for union types
  * @library ../../../lib
  */
@@ -39,7 +39,6 @@
 import com.sun.source.tree.*;
 import com.sun.source.util.*;
 
-
 public class TestUnionType extends JavacTestingAbstractProcessor {
     enum TestKind {
         SingleType("E1", "E1",
@@ -194,7 +193,7 @@
         }
     }
 
-    class TypePrinter extends SimpleTypeVisitor7<String, Void> {
+    class TypePrinter extends SimpleTypeVisitor<String, Void> {
         @Override
         protected String defaultAction(TypeMirror tm, Void ignore) {
             return String.valueOf(tm.getKind());
--- a/langtools/test/tools/javac/processing/model/util/deprecation/TestDeprecation.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/model/util/deprecation/TestDeprecation.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
         return true;
     }
 
-    private class DeprecationChecker extends ElementScanner7<Boolean,Void> {
+    private class DeprecationChecker extends ElementScanner<Boolean,Void> {
         private Elements elementUtils;
         private boolean failure;
         DeprecationChecker() {
--- a/langtools/test/tools/javac/processing/warnings/TestSourceVersionWarnings.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/processing/warnings/TestSourceVersionWarnings.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 6376083 6376084 6458819
+ * @bug 6376083 6376084 6458819 7025784 7025786 7025789
  * @summary Test that warnings about source versions are output as expected.
  * @author  Joseph D. Darcy
  * @compile TestSourceVersionWarnings.java
@@ -35,7 +35,8 @@
  * @compile/ref=gold_sv_warn_5_6.out   -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_5 -source 1.6 -Xlint:-options HelloWorld.java
  * @compile/ref=gold_sv_none.out       -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_6 -source 1.6 -Xlint:-options HelloWorld.java
  * @compile/ref=gold_unsp_warn.out     -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_6 -source 1.6 -Xlint:-options -Aunsupported HelloWorld.java
- * @compile/ref=gold_sv_none.out       -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_7 -source 1.7                 HelloWorld.java
+ * @compile/ref=gold_sv_none.out       -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_7 -source 1.7 -Xlint:-options HelloWorld.java
+ * @compile/ref=gold_sv_none.out       -XDrawDiagnostics -processor TestSourceVersionWarnings -proc:only -ASourceVersion=RELEASE_8 -source 1.8                 HelloWorld.java
  */
 
 import java.util.Set;
@@ -51,7 +52,8 @@
 /**
  * This processor returns the supported source level as indicated by
  * the "SourceLevel" option; therefore, don't use
- * JavacTestingAbstractProcessor which returns the latest source level.
+ * JavacTestingAbstractProcessor which returns the latest source
+ * level.
  */
 @SupportedAnnotationTypes("*")
 @SupportedOptions("SourceVersion")
--- a/langtools/test/tools/javac/quid/T6999438.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/quid/T6999438.java	Wed Aug 17 22:47:12 2011 -0700
@@ -1,7 +1,7 @@
 /* @test /nodynamiccopyright/
  * @bug 6999438
  * @summary remove support for exotic identifiers from JDK 7
- * @compile/fail/ref=T6999438.out -XDrawDiagnostics -source 7 T6999438.java
+ * @compile/fail/ref=T6999438.out -XDrawDiagnostics T6999438.java
  */
 
 class Test {
--- a/langtools/test/tools/javac/util/T6597678.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/util/T6597678.java	Wed Aug 17 22:47:12 2011 -0700
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @bug 6597678
+ * @bug 6597678 6449184
  * @summary Ensure Messages propogated between rounds
  * @library ../lib
  * @build JavacTestingAbstractProcessor T6597678
@@ -42,26 +42,28 @@
 import com.sun.tools.javac.util.Context;
 import com.sun.tools.javac.util.JavacMessages;
 
+@SupportedOptions("WriterString")
 public class T6597678 extends JavacTestingAbstractProcessor {
     public static void main(String... args) throws Exception {
         new T6597678().run();
     }
 
-
     void run() throws Exception {
         String myName = T6597678.class.getSimpleName();
         File testSrc = new File(System.getProperty("test.src"));
         File file = new File(testSrc, myName + ".java");
 
-        compile(
+        StringWriter sw = new StringWriter();
+        PrintWriter pw = new PrintWriter(sw);
+
+        compile(sw, pw,
             "-proc:only",
             "-processor", myName,
+            "-AWriterString=" + pw.toString(),
             file.getPath());
     }
 
-    void compile(String... args) throws Exception {
-        StringWriter sw = new StringWriter();
-        PrintWriter pw = new PrintWriter(sw);
+    void compile(StringWriter sw, PrintWriter pw, String... args) throws Exception {
         int rc = com.sun.tools.javac.Main.compile(args, pw);
         pw.close();
         String out = sw.toString();
@@ -76,6 +78,7 @@
     @Override
     public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
         Context context = ((JavacProcessingEnvironment) processingEnv).getContext();
+        PrintWriter out = ((JavacProcessingEnvironment) processingEnv).getWriter();
         Locale locale = context.get(Locale.class);
         JavacMessages messages = context.get(JavacMessages.messagesKey);
 
@@ -83,9 +86,13 @@
         if (round == 1) {
             initialLocale = locale;
             initialMessages = messages;
+            initialWriter = out;
+
+            checkEqual("writerString", out.toString().intern(), options.get("WriterString").intern());
         } else {
             checkEqual("locale", locale, initialLocale);
             checkEqual("messages", messages, initialMessages);
+            checkEqual("writer", out, initialWriter);
         }
 
         return true;
@@ -102,4 +109,5 @@
     int round = 0;
     Locale initialLocale;
     JavacMessages initialMessages;
+    PrintWriter initialWriter;
 }
--- a/langtools/test/tools/javac/versions/check.sh	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/versions/check.sh	Wed Aug 17 22:47:12 2011 -0700
@@ -22,7 +22,7 @@
 #
 
 # @test
-# @bug 4981566 5028634 5094412 6304984
+# @bug 4981566 5028634 5094412 6304984 7025786 7025789
 # @summary Check interpretation of -target and -source options
 # @build CheckClassFileVersion
 # @run shell check.sh 
@@ -65,9 +65,14 @@
 check 51.0 -source 6
 check 51.0 -source 1.7
 check 51.0 -source 7
-check 51.0 -target 1.7
-check 51.0 -target 7
+check 51.0 -source 7 -target 1.7
+check 51.0 -source 7 -target 7
 
+# Update when class file version is revved
+check 51.0 -source 1.8
+check 51.0 -source 8
+check 51.0 -target 1.8
+check 51.0 -target 8
 
 # Check source versions
 
@@ -96,6 +101,7 @@
 checksrc15() { pass $* $TC/X.java; pass $* $TC/Y.java; }
 checksrc16() { checksrc15 $* ; }
 checksrc17() { checksrc15 $* ; }
+checksrc18() { checksrc15 $* ; }
 
 checksrc14 -source 1.4
 checksrc14 -source 1.4 -target 1.5
@@ -108,16 +114,24 @@
 checksrc16 -source 1.6 -target 1.6
 checksrc16 -source 6 -target 6
 
-checksrc17
-checksrc17 -target 1.7
-checksrc17 -target 7
 checksrc17 -source 1.7
 checksrc17 -source 7
 checksrc17 -source 1.7 -target 1.7
 checksrc17 -source 7 -target 7
 
+checksrc18
+checksrc18 -target 1.8
+checksrc18 -target 8
+checksrc18 -source 1.8
+checksrc18 -source 8
+checksrc18 -source 1.8 -target 1.8
+checksrc18 -source 8 -target 8
+
 fail -source 1.5 -target 1.4 $TC/X.java
 fail -source 1.6 -target 1.4 $TC/X.java
 fail -source 6   -target 1.4 $TC/X.java
 fail -source 1.6 -target 1.5 $TC/X.java
 fail -source 6   -target 1.5 $TC/X.java
+fail -source 7   -target 1.6 $TC/X.java
+fail -source 8   -target 1.6 $TC/X.java
+fail -source 8   -target 1.7 $TC/X.java
--- a/langtools/test/tools/javac/warnings/Serial.java	Wed Aug 17 15:18:16 2011 -0700
+++ b/langtools/test/tools/javac/warnings/Serial.java	Wed Aug 17 22:47:12 2011 -0700
@@ -29,7 +29,6 @@
  * @compile -Xlint:all Serial.java
  * @compile -Werror Serial.java
  * @compile/fail -Werror -Xlint:serial Serial.java
- * @compile/fail -Werror -Xlint:all,-path T4994049/ Serial.java
  */
 
 import java.io.Serializable;
--- a/make/jprt.properties	Wed Aug 17 15:18:16 2011 -0700
+++ b/make/jprt.properties	Wed Aug 17 22:47:12 2011 -0700
@@ -25,22 +25,12 @@
 
 # Properties for jprt
 
-# At submit time, the release supplied will be in jprt.submit.release
-#    and will be one of the official release names defined in jprt.
-#    jprt supports property value expansion using ${property.name} syntax.
-
-# This tells jprt what default release we want to build
-jprt.tools.default.release=${jprt.submit.release}
+# Release to build
+jprt.tools.default.release=jdk8
 
 # The different build flavors we want, we override here so we just get these 2
 jprt.build.flavors=product,fastdebug
 
-# Define the Windows we want (temporary)
-jprt.my.windows.i586.jdk7b107=windows_i586_5.0
-jprt.my.windows.i586.jdk7temp=windows_i586_5.0
-jprt.my.windows.i586.jdk7=windows_i586_5.1
-jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
-
 # Standard list of jprt build targets for this source tree
 jprt.build.targets= 						\
     solaris_sparc_5.10-{product|fastdebug}, 			\
@@ -49,7 +39,7 @@
     solaris_x64_5.10-{product|fastdebug}, 			\
     linux_i586_2.6-{product|fastdebug}, 			\
     linux_x64_2.6-{product|fastdebug}, 				\
-    ${jprt.my.windows.i586}-{product|fastdebug}, 		\
+    windows_i586_5.1-{product|fastdebug}, 			\
     windows_x64_5.2-{product|fastdebug}
 
 # User can select the test set with jprt submit "-testset name" option
@@ -64,7 +54,7 @@
     solaris_x64_5.10-product-c2-jvm98, 				\
     linux_i586_2.6-product-{c1|c2}-jvm98, 			\
     linux_x64_2.6-product-c2-jvm98, 				\
-    ${jprt.my.windows.i586}-product-c1-jvm98, 			\
+    windows_i586_5.1-product-c1-jvm98, 				\
     windows_x64_5.2-product-c2-jvm98, 				\
     								\
     solaris_sparc_5.10-product-c1-scimark, 			\
@@ -73,7 +63,7 @@
     solaris_x64_5.10-product-c2-scimark, 			\
     linux_i586_2.6-product-{c1|c2}-scimark, 			\
     linux_x64_2.6-product-c2-scimark, 				\
-    ${jprt.my.windows.i586}-product-c1-scimark, 		\
+    windows_i586_5.1-product-c1-scimark, 			\
     windows_x64_5.2-product-c2-scimark
 
 # Default jdk test targets in test/Makefile (no fastdebug & limited c2 testing)
@@ -85,7 +75,7 @@
     solaris_x64_5.10-product-c2-langtools_jtreg, 		\
     linux_i586_2.6-product-{c1|c2}-langtools_jtreg, 		\
     linux_x64_2.6-product-c2-langtools_jtreg, 			\
-    ${jprt.my.windows.i586}-product-c1-langtools_jtreg, 	\
+    windows_i586_5.1-product-c1-langtools_jtreg, 		\
     windows_x64_5.2-product-c2-langtools_jtreg, 		\
     								\
     solaris_sparc_5.10-product-c1-jdk_beans1, 			\
@@ -94,7 +84,7 @@
     solaris_x64_5.10-product-c2-jdk_beans1, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_beans1, 			\
     linux_x64_2.6-product-c2-jdk_beans1, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_beans1, 		\
+    windows_i586_5.1-product-c1-jdk_beans1, 			\
     windows_x64_5.2-product-c2-jdk_beans1, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_io, 			\
@@ -103,7 +93,7 @@
     solaris_x64_5.10-product-c2-jdk_io, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_io, 			\
     linux_x64_2.6-product-c2-jdk_io, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_io, 			\
+    windows_i586_5.1-product-c1-jdk_io, 			\
     windows_x64_5.2-product-c2-jdk_io, 				\
     								\
     solaris_sparc_5.10-product-c1-jdk_lang, 			\
@@ -112,7 +102,7 @@
     solaris_x64_5.10-product-c2-jdk_lang, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_lang, 			\
     linux_x64_2.6-product-c2-jdk_lang, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_lang, 		\
+    windows_i586_5.1-product-c1-jdk_lang, 			\
     windows_x64_5.2-product-c2-jdk_lang, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_math, 			\
@@ -121,7 +111,7 @@
     solaris_x64_5.10-product-c2-jdk_math, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_math, 			\
     linux_x64_2.6-product-c2-jdk_math, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_math, 		\
+    windows_i586_5.1-product-c1-jdk_math, 			\
     windows_x64_5.2-product-c2-jdk_math, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_misc, 			\
@@ -130,7 +120,7 @@
     solaris_x64_5.10-product-c2-jdk_misc, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_misc, 			\
     linux_x64_2.6-product-c2-jdk_misc, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_misc, 		\
+    windows_i586_5.1-product-c1-jdk_misc, 			\
     windows_x64_5.2-product-c2-jdk_misc, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_net, 			\
@@ -139,7 +129,7 @@
     solaris_x64_5.10-product-c2-jdk_net, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_net, 			\
     linux_x64_2.6-product-c2-jdk_net, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_net, 		\
+    windows_i586_5.1-product-c1-jdk_net, 			\
     windows_x64_5.2-product-c2-jdk_net, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_nio1, 			\
@@ -148,7 +138,7 @@
     solaris_x64_5.10-product-c2-jdk_nio1, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_nio1, 			\
     linux_x64_2.6-product-c2-jdk_nio1, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_nio1, 		\
+    windows_i586_5.1-product-c1-jdk_nio1, 			\
     windows_x64_5.2-product-c2-jdk_nio1, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_nio2, 			\
@@ -157,7 +147,7 @@
     solaris_x64_5.10-product-c2-jdk_nio2, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_nio2, 			\
     linux_x64_2.6-product-c2-jdk_nio2, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_nio2, 		\
+    windows_i586_5.1-product-c1-jdk_nio2, 			\
     windows_x64_5.2-product-c2-jdk_nio2, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_nio3, 			\
@@ -166,7 +156,7 @@
     solaris_x64_5.10-product-c2-jdk_nio3, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_nio3, 			\
     linux_x64_2.6-product-c2-jdk_nio3, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_nio3, 		\
+    windows_i586_5.1-product-c1-jdk_nio3, 			\
     windows_x64_5.2-product-c2-jdk_nio3, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_security1, 		\
@@ -175,7 +165,7 @@
     solaris_x64_5.10-product-c2-jdk_security1, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_security1, 		\
     linux_x64_2.6-product-c2-jdk_security1, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_security1, 		\
+    windows_i586_5.1-product-c1-jdk_security1, 			\
     windows_x64_5.2-product-c2-jdk_security1, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_text, 			\
@@ -184,7 +174,7 @@
     solaris_x64_5.10-product-c2-jdk_text, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_text, 			\
     linux_x64_2.6-product-c2-jdk_text, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_text, 		\
+    windows_i586_5.1-product-c1-jdk_text, 			\
     windows_x64_5.2-product-c2-jdk_text, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_tools1, 			\
@@ -193,7 +183,7 @@
     solaris_x64_5.10-product-c2-jdk_tools1, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_tools1, 			\
     linux_x64_2.6-product-c2-jdk_tools1, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_tools1, 		\
+    windows_i586_5.1-product-c1-jdk_tools1, 			\
     windows_x64_5.2-product-c2-jdk_tools1, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_util, 			\
@@ -202,7 +192,7 @@
     solaris_x64_5.10-product-c2-jdk_util, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_util, 			\
     linux_x64_2.6-product-c2-jdk_util, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_util, 		\
+    windows_i586_5.1-product-c1-jdk_util, 			\
     windows_x64_5.2-product-c2-jdk_util
 
 # All vm test targets (but still no fastdebug & limited c2 testing)
@@ -216,7 +206,7 @@
     solaris_x64_5.10-product-c2-runThese, 			\
     linux_i586_2.6-product-{c1|c2}-runThese, 			\
     linux_x64_2.6-product-c2-runThese, 				\
-    ${jprt.my.windows.i586}-product-c1-runThese, 		\
+    windows_i586_5.1-product-c1-runThese, 			\
     windows_x64_5.2-product-c2-runThese, 			\
     								\
     solaris_sparc_5.10-product-c1-jbb_default, 			\
@@ -225,7 +215,7 @@
     solaris_x64_5.10-product-c2-jbb_default, 			\
     linux_i586_2.6-product-{c1|c2}-jbb_default, 		\
     linux_x64_2.6-product-c2-jbb_default, 			\
-    ${jprt.my.windows.i586}-product-c1-jbb_default, 		\
+    windows_i586_5.1-product-c1-jbb_default, 			\
     windows_x64_5.2-product-c2-jbb_default
 
 # All jdk test targets (but still no fastdebug & limited c2 testing)
@@ -239,7 +229,7 @@
     solaris_x64_5.10-product-c2-jdk_awt, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_awt, 			\
     linux_x64_2.6-product-c2-jdk_awt, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_awt, 		\
+    windows_i586_5.1-product-c1-jdk_awt, 			\
     windows_x64_5.2-product-c2-jdk_awt, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_beans2, 			\
@@ -248,7 +238,7 @@
     solaris_x64_5.10-product-c2-jdk_beans2, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_beans2, 			\
     linux_x64_2.6-product-c2-jdk_beans2, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_beans2, 		\
+    windows_i586_5.1-product-c1-jdk_beans2, 			\
     windows_x64_5.2-product-c2-jdk_beans2, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_beans3, 			\
@@ -257,7 +247,7 @@
     solaris_x64_5.10-product-c2-jdk_beans3, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_beans3, 			\
     linux_x64_2.6-product-c2-jdk_beans3, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_beans3, 		\
+    windows_i586_5.1-product-c1-jdk_beans3, 			\
     windows_x64_5.2-product-c2-jdk_beans3, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_management1, 		\
@@ -266,7 +256,7 @@
     solaris_x64_5.10-product-c2-jdk_management1, 		\
     linux_i586_2.6-product-{c1|c2}-jdk_management1, 		\
     linux_x64_2.6-product-c2-jdk_management1, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_management1, 	\
+    windows_i586_5.1-product-c1-jdk_management1, 		\
     windows_x64_5.2-product-c2-jdk_management1, 		\
     								\
     solaris_sparc_5.10-product-c1-jdk_management2, 		\
@@ -275,7 +265,7 @@
     solaris_x64_5.10-product-c2-jdk_management2, 		\
     linux_i586_2.6-product-{c1|c2}-jdk_management2, 		\
     linux_x64_2.6-product-c2-jdk_management2, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_management2, 	\
+    windows_i586_5.1-product-c1-jdk_management2, 		\
     windows_x64_5.2-product-c2-jdk_management2, 		\
     								\
     solaris_sparc_5.10-product-c1-jdk_rmi, 			\
@@ -284,7 +274,7 @@
     solaris_x64_5.10-product-c2-jdk_rmi, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_rmi, 			\
     linux_x64_2.6-product-c2-jdk_rmi, 				\
-    ${jprt.my.windows.i586}-product-c1-jdk_rmi, 		\
+    windows_i586_5.1-product-c1-jdk_rmi, 			\
     windows_x64_5.2-product-c2-jdk_rmi, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_security2, 		\
@@ -293,7 +283,7 @@
     solaris_x64_5.10-product-c2-jdk_security2, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_security2, 		\
     linux_x64_2.6-product-c2-jdk_security2, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_security2, 		\
+    windows_i586_5.1-product-c1-jdk_security2, 			\
     windows_x64_5.2-product-c2-jdk_security2, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_security3, 		\
@@ -302,7 +292,7 @@
     solaris_x64_5.10-product-c2-jdk_security3, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_security3, 		\
     linux_x64_2.6-product-c2-jdk_security3, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_security3, 		\
+    windows_i586_5.1-product-c1-jdk_security3, 			\
     windows_x64_5.2-product-c2-jdk_security3, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_swing, 			\
@@ -311,7 +301,7 @@
     solaris_x64_5.10-product-c2-jdk_swing, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_swing, 			\
     linux_x64_2.6-product-c2-jdk_swing, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_swing, 		\
+    windows_i586_5.1-product-c1-jdk_swing, 			\
     windows_x64_5.2-product-c2-jdk_swing, 			\
     								\
     solaris_sparc_5.10-product-c1-jdk_tools2, 			\
@@ -320,7 +310,7 @@
     solaris_x64_5.10-product-c2-jdk_tools2, 			\
     linux_i586_2.6-product-{c1|c2}-jdk_tools2, 			\
     linux_x64_2.6-product-c2-jdk_tools2, 			\
-    ${jprt.my.windows.i586}-product-c1-jdk_tools2, 		\
+    windows_i586_5.1-product-c1-jdk_tools2, 			\
     windows_x64_5.2-product-c2-jdk_tools2
 
 # JCK test targets in test/Makefile (no fastdebug & limited c2, windows broken)
--- a/make/sanity-rules.gmk	Wed Aug 17 15:18:16 2011 -0700
+++ b/make/sanity-rules.gmk	Wed Aug 17 22:47:12 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
 
 sanity:: post-sanity
 
-source-sanity : pre-sanity alt_bootdir bootdir post-sanity
+source-sanity : pre-sanity post-sanity
 
 $(ERROR_FILE) $(WARNING_FILE) $(MESSAGE_FILE):
 	@$(prep-target)
@@ -224,45 +224,6 @@
 endif
 
 ######################################################
-# if specified, ALT_BOOTDIR must point to non-relative path if set
-######################################################
-alt_bootdir:
-ifdef ALT_BOOTDIR
-	@if [ `$(ECHO) $(subst \,/,$(ALT_BOOTDIR)) | $(EGREP) -c '^([A-Za-z]:)?/'` -ne 1 ]; then \
-	  $(ECHO) "ERROR: ALT_BOOTDIR must be an Absolute Path Name, \n" \
-	    "      not a Relative Path Name. \n" \
-	    "      The current value of ALT_BOOTDIR is \n" \
-	    "          $(ALT_BOOTDIR) \n" \
-	    "      Please fix this and continue your build. \n" \
-	    "" >> $(ERROR_FILE) ; \
-	fi
-endif
-
-######################################################
-# BOOTDIR must point to a valid JDK.
-######################################################
-JAVA_VERSION := $(shell $(BOOTDIR)/bin/java$(EXE_SUFFIX) -version 2>&1  | $(NAWK) -F'"' '{ print $$2 }')
-JAVA_VERSION_OK := $(shell $(ECHO) $(JAVA_VERSION) | $(EGREP) -c '^1.[5-6].[0-9]')
-bootdir:
-	@if [ -x $(BOOTDIR)/bin/java$(EXE_SUFFIX) ]; then \
-	  if [ $(JAVA_VERSION_OK) -ne 1 ]; then \
-	    $(ECHO) "ERROR: Your BOOTDIR environment variable does not point \n" \
-	      "      to a valid JDK for bootstrapping this build. \n" \
-	      "      A JDK $(JDK_MKTG_VERSION) source generation process must be \n" \
-	      "      bootstrapped using JDK $(PREVIOUS_JDK_VERSION) fcs (or later). \n" \
-	      "      Apparently, your bootstrap JDK is version $(JAVA_VERSION) \n" \
-	      "      Please update your ALT_BOOTDIR setting and start your build again. \n" \
-	      "" >> $(ERROR_FILE) ; \
-	  fi \
-	else \
-	  $(ECHO) "ERROR: BOOTDIR does not point to a valid Java 2 SDK \n" \
-	    "      Check that you have access to \n" \
-	    "          $(BOOTDIR)/bin/java$(EXE_SUFFIX) \n" \
-	    "      and/or check your value of ALT_BOOTDIR. \n" \
-	    "" >> $(ERROR_FILE) ; \
-	fi
-
-######################################################
 # dump out the variable settings...
 ######################################################
 ifneq ($(PLATFORM), windows)
@@ -350,4 +311,4 @@
 .PHONY: sanity settings pre-sanity insane \
 	post-sanity post-sanity-hotspot post-sanity-jdk \
 	post-sanity-install post-sanity-deploy \
-	alt_bootdir bootdir environment
+	environment