Merge
authorbharadwaj
Thu, 04 Apr 2013 17:01:34 -0700
changeset 16631 c6860a0ebc99
parent 16610 899daa32f6a0 (current diff)
parent 16630 3dea566541da (diff)
child 16632 7d026deaf1db
Merge
hotspot/make/bsd/build.sh
hotspot/make/linux/build.sh
hotspot/make/solaris/build.sh
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/share/vm/classfile/systemDictionary.cpp
hotspot/src/share/vm/classfile/systemDictionary.hpp
hotspot/src/share/vm/oops/method.cpp
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/make/bsd/build.sh	Thu Apr 04 19:07:32 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-#! /bin/sh
-#
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Make sure the variable JAVA_HOME is set before running this script.
-
-set -u
-
-
-if [ $# != 2 ]; then 
-    echo "Usage : $0 Build_Options Location"
-    echo "Build Options : debug or optimized or basicdebug or basic or clean"
-    echo "Location : specify any workspace which has gamma sources"
-    exit 1
-fi
-
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-case `uname -m` in
-  i386|i486|i586|i686)
-    mach=i386
-    ;;
-  *)
-    echo "Unsupported machine: " `uname -m`
-    exit 1
-    ;;
-esac
-
-if [ "${JAVA_HOME}" = ""  -o  ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
-    echo "JAVA_HOME needs to be set to a valid JDK path"
-    echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
-    echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
-    exit 1
-fi
-
-
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
-
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
-
-
-for gm in gmake gnumake
-do
-  if [ "${GNUMAKE-}" != "" ]; then break; fi
-  ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
-done
-: ${GNUMAKE:?'Cannot locate the gnumake program.  Stop.'}
-
-
-echo "### ENVIRONMENT SETTINGS:"
-export JAVA_HOME		; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH		; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH		; echo "CLASSPATH=$CLASSPATH"
-export GNUMAKE			; echo "GNUMAKE=$GNUMAKE"
-echo "###"
-
-Build_Options=$1
-Location=$2
-
-case ${Location} in
-/*) true;;
-?*) Location=`(cd ${Location}; pwd)`;;
-esac
-
-echo \
-${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
-${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
--- a/hotspot/make/bsd/makefiles/buildtree.make	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/make/bsd/makefiles/buildtree.make	Thu Apr 04 17:01:34 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,17 @@
 # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
 gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
 
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
@@ -212,6 +223,7 @@
 	echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
 	echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
 	echo "OPENJDK = $(OPENJDK)"; \
+	echo "$(LP64_SETTING/$(DATA_MODE))"; \
 	echo; \
 	echo "# Used for platform dispatching"; \
 	echo "TARGET_DEFINES  = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/build.sh	Thu Apr 04 17:01:34 2013 -0700
@@ -0,0 +1,92 @@
+#! /bin/sh
+#
+# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Make sure the variable JAVA_HOME is set before running this script.
+
+set -u
+
+
+if [ $# -lt 1 ]; then 
+    echo "Usage : $0 BuildTarget [LP64=1] [BuildOptions]"
+    echo "               Server VM | Client VM"
+    echo "BuildTarget :  debug     | debug1"
+    echo "               fastdebug | fastdebug1"
+    echo "               jvmg      | jvmg1"
+    echo "               optimized | optimized1"
+    echo "               profiled  | profiled1"
+    echo "               product   | product1"
+    exit 1
+fi
+
+if [ "${JAVA_HOME-}" = ""  -o  ! -d "${JAVA_HOME-}" -o ! -d ${JAVA_HOME-}/jre/lib/ ]; then
+    echo "JAVA_HOME needs to be set to a valid JDK path"
+    echo "JAVA_HOME: ${JAVA_HOME-}"
+    exit 1
+fi
+
+# Just in case:
+JAVA_HOME=`( cd $JAVA_HOME; pwd )`
+
+if [ "${ALT_BOOTDIR-}" = ""  -o  ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
+    ALT_BOOTDIR=${JAVA_HOME}
+fi
+
+# build in current directory by default
+if [ "${ALT_OUTPUTDIR-}" = ""  -o  ! -d "${ALT_OUTPUTDIR-}" ]; then
+    ALT_OUTPUTDIR=`(pwd)`
+fi
+
+HOTSPOT_SRC=`(dirname $0)`/..
+HOTSPOT_SRC=`(cd ${HOTSPOT_SRC}; pwd)`
+
+for gm in gmake gnumake
+do
+  if [ "${GNUMAKE-}" != "" ]; then break; fi
+  ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
+done
+: ${GNUMAKE:?'Cannot locate the gnumake program.  Stop.'}
+
+# quiet build by default
+Quiet="MAKE_VERBOSE="
+
+# no debug info by default
+NoDebugInfo="ENABLE_FULL_DEBUG_SYMBOLS="
+
+LANG=C
+
+echo "### ENVIRONMENT SETTINGS:"
+export HOTSPOT_SRC		; echo "HOTSPOT_SRC=$HOTSPOT_SRC"
+export JAVA_HOME		; echo "JAVA_HOME=$JAVA_HOME"
+export ALT_BOOTDIR		; echo "ALT_BOOTDIR=$ALT_BOOTDIR"
+export ALT_OUTPUTDIR		; echo "ALT_OUTPUTDIR=$ALT_OUTPUTDIR"
+export GNUMAKE			; echo "GNUMAKE=$GNUMAKE"
+export LANG			; echo "LANG=$LANG"
+echo "###"
+
+BuildOptions="$Quiet $NoDebugInfo $*"
+
+echo \
+${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
+${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
--- a/hotspot/make/linux/build.sh	Thu Apr 04 19:07:32 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-#! /bin/sh
-#
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Make sure the variable JAVA_HOME is set before running this script.
-
-set -u
-
-
-if [ $# != 2 ]; then 
-    echo "Usage : $0 Build_Options Location"
-    echo "Build Options : debug or optimized or basicdebug or basic or clean"
-    echo "Location : specify any workspace which has gamma sources"
-    exit 1
-fi
-
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-case `uname -m` in
-  i386|i486|i586|i686)
-    mach=i386
-    ;;
-  x86_64)
-    mach=amd64
-    ;;
-  *)
-    echo "Unsupported machine: " `uname -m`
-    exit 1
-    ;;
-esac
-
-if [ "${JAVA_HOME}" = ""  -o  ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
-    echo "JAVA_HOME needs to be set to a valid JDK path"
-    echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
-    echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
-    exit 1
-fi
-
-
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
-
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
-
-
-for gm in gmake gnumake
-do
-  if [ "${GNUMAKE-}" != "" ]; then break; fi
-  ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
-done
-: ${GNUMAKE:?'Cannot locate the gnumake program.  Stop.'}
-
-
-echo "### ENVIRONMENT SETTINGS:"
-export JAVA_HOME		; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH		; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH		; echo "CLASSPATH=$CLASSPATH"
-export GNUMAKE			; echo "GNUMAKE=$GNUMAKE"
-echo "###"
-
-Build_Options=$1
-Location=$2
-
-case ${Location} in
-/*) true;;
-?*) Location=`(cd ${Location}; pwd)`;;
-esac
-
-echo \
-${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
-${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
--- a/hotspot/make/linux/makefiles/buildtree.make	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/make/linux/makefiles/buildtree.make	Thu Apr 04 17:01:34 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -183,6 +183,19 @@
 # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
 gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
 
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/sparc = 32
+DATA_MODE/sparcv9 = 64
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
@@ -205,6 +218,7 @@
 	echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
 	echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
 	echo "OPENJDK = $(OPENJDK)"; \
+	echo "$(LP64_SETTING/$(DATA_MODE))"; \
 	echo; \
 	echo "# Used for platform dispatching"; \
 	echo "TARGET_DEFINES  = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
--- a/hotspot/make/solaris/build.sh	Thu Apr 04 19:07:32 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,127 +0,0 @@
-#! /bin/sh
-#
-# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Make sure the variable JAVA_HOME is set before running this script.
-
-set -u
-
-
-usage() {
-    (
-        echo "Usage : $0 [-sb | -sbfast] config ws_path"
-        echo ""
-        echo "Where:"
-        echo "    -sb     ::= enable source browser info generation for"
-        echo "                all configs during compilation"
-        echo ""
-        echo "    -sbfast ::= enable source browser info generation for"
-        echo "                all configs without compilation"
-        echo ""
-        echo "    config  ::= debug     | debug1     | debugcore"
-        echo "                fastdebug | fastdebug1 | fastdebugcore"
-        echo "                jvmg      | jvmg1      | jvmgcore"
-        echo "                optimized | optimized1 | optimizedcore"
-        echo "                profiled  | profiled1  | profiledcore"
-        echo "                product   | product1   | productcore"
-        echo ""
-        echo "    ws_path ::= path to HotSpot workspace"
-    ) >&2
-    exit 1
-}
-
-# extract possible options
-options=""
-if [ $# -gt 2 ]; then 
-    case "$1" in
-    -sb)
-	options="CFLAGS_BROWSE=-xsb"
-	shift
-	;;
-    -sbfast)
-	options="CFLAGS_BROWSE=-xsbfast"
-	shift
-	;;
-    *)
-	echo "Unknown option: '$1'" >&2
-	usage
-	;;
-    esac
-fi
-
-# should be just two args left at this point
-if [ $# != 2 ]; then 
-    usage
-fi
-
-# Just in case:
-case ${JAVA_HOME} in
-/*) true;;
-?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
-esac
-
-if [ "${JAVA_HOME}" = ""  -o  ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/`uname -p` ]; then
-    echo "JAVA_HOME needs to be set to a valid JDK path"
-    echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
-    echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
-    exit 1
-fi
-
-
-LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
-${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
-
-# This is necessary as long as we are using the old launcher
-# with the new distribution format:
-CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
-
-
-for gm in gmake gnumake
-do
-  if [ "${GNUMAKE-}" != "" ]; then break; fi
-  ($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
-done
-: ${GNUMAKE:?'Cannot locate the gnumake program.  Stop.'}
-
-
-echo "### ENVIRONMENT SETTINGS:"
-export JAVA_HOME		; echo "JAVA_HOME=$JAVA_HOME"
-export LD_LIBRARY_PATH		; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-export CLASSPATH		; echo "CLASSPATH=$CLASSPATH"
-export GNUMAKE			; echo "GNUMAKE=$GNUMAKE"
-echo "###"
-
-config=$1
-ws_path=$2
-
-case ${ws_path} in
-/*) true;;
-?*) ws_path=`(cd ${ws_path}; pwd)`;;
-esac
-
-echo \
-${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
-    $config GAMMADIR=${ws_path} $options
-${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
-    $config GAMMADIR=${ws_path} $options
--- a/hotspot/make/solaris/makefiles/buildtree.make	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/make/solaris/makefiles/buildtree.make	Thu Apr 04 17:01:34 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -176,6 +176,19 @@
 # literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
 gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
 
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/i486 = 32
+DATA_MODE/sparc = 32
+DATA_MODE/sparcv9 = 64
+DATA_MODE/amd64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
 flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -51,6 +51,16 @@
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
+  if (_info->deoptimize_on_exception()) {
+    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    __ call(a, relocInfo::runtime_call_type);
+    __ delayed()->nop();
+    ce->add_call_info_here(_info);
+    ce->verify_oop_map(_info);
+    debug_only(__ should_not_reach_here());
+    return;
+  }
+
   if (_index->is_register()) {
     __ mov(_index->as_register(), G4);
   } else {
@@ -64,11 +74,22 @@
   __ delayed()->nop();
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
-#ifdef ASSERT
-  __ should_not_reach_here();
-#endif
+  debug_only(__ should_not_reach_here());
+}
+
+PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
+  _info = new CodeEmitInfo(info);
 }
 
+void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
+  __ bind(_entry);
+  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  __ call(a, relocInfo::runtime_call_type);
+  __ delayed()->nop();
+  ce->add_call_info_here(_info);
+  ce->verify_oop_map(_info);
+  debug_only(__ should_not_reach_here());
+}
 
 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
@@ -99,10 +120,17 @@
 
 
 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
+  address a;
+  if (_info->deoptimize_on_exception()) {
+    // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
+    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  } else {
+    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+  }
+
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   __ bind(_entry);
-  __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
-          relocInfo::runtime_call_type);
+  __ call(a, relocInfo::runtime_call_type);
   __ delayed()->nop();
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -3361,6 +3361,45 @@
   __ mov(G2_thread, result_reg->as_register());
 }
 
+#ifdef ASSERT
+// emit run-time assertion
+void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
+  assert(op->code() == lir_assert, "must be");
+
+  if (op->in_opr1()->is_valid()) {
+    assert(op->in_opr2()->is_valid(), "both operands must be valid");
+    comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+  } else {
+    assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
+    assert(op->condition() == lir_cond_always, "no other conditions allowed");
+  }
+
+  Label ok;
+  if (op->condition() != lir_cond_always) {
+    Assembler::Condition acond;
+    switch (op->condition()) {
+      case lir_cond_equal:        acond = Assembler::equal;                break;
+      case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
+      case lir_cond_less:         acond = Assembler::less;                 break;
+      case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
+      case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
+      case lir_cond_greater:      acond = Assembler::greater;              break;
+      case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
+      case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
+      default:                         ShouldNotReachHere();
+    };
+    __ br(acond, false, Assembler::pt, ok);
+    __ delayed()->nop();
+  }
+  if (op->halt()) {
+    const char* str = __ code_string(op->msg());
+    __ stop(str);
+  } else {
+    breakpoint();
+  }
+  __ bind(ok);
+}
+#endif
 
 void LIR_Assembler::peephole(LIR_List* lir) {
   LIR_OpList* inst = lir->instructions_list();
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -324,7 +324,7 @@
 
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
   assert(x->is_pinned(),"");
-  bool needs_range_check = true;
+  bool needs_range_check = x->compute_needs_range_check();
   bool use_length = x->length() != NULL;
   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@@ -339,12 +339,9 @@
   array.load_item();
   index.load_nonconstant();
 
-  if (use_length) {
-    needs_range_check = x->compute_needs_range_check();
-    if (needs_range_check) {
-      length.set_instruction(x->length());
-      length.load_item();
-    }
+  if (use_length && needs_range_check) {
+    length.set_instruction(x->length());
+    length.load_item();
   }
   if (needs_store_check) {
     value.load_item();
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -987,6 +987,25 @@
       break;
 #endif // INCLUDE_ALL_GCS
 
+    case predicate_failed_trap_id:
+      {
+        __ set_info("predicate_failed_trap", dont_gc_arguments);
+        OopMap* oop_map = save_live_registers(sasm);
+
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
+
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
+
+        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+        assert(deopt_blob != NULL, "deoptimization blob must have been created");
+        restore_live_registers(sasm);
+        __ restore();
+        __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
+        __ delayed()->nop();
+      }
+      break;
+
     default:
       { __ set_info("unimplemented entry", dont_gc_arguments);
         __ save_frame(0);
--- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -101,6 +101,15 @@
 
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
+  if (_info->deoptimize_on_exception()) {
+    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    __ call(RuntimeAddress(a));
+    ce->add_call_info_here(_info);
+    ce->verify_oop_map(_info);
+    debug_only(__ should_not_reach_here());
+    return;
+  }
+
   // pass the array index on stack because all registers must be preserved
   if (_index->is_cpu_register()) {
     ce->store_parameter(_index->as_register(), 0);
@@ -115,9 +124,22 @@
   }
   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
   ce->add_call_info_here(_info);
+  ce->verify_oop_map(_info);
   debug_only(__ should_not_reach_here());
 }
 
+PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
+  _info = new CodeEmitInfo(info);
+}
+
+void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
+  __ bind(_entry);
+  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  __ call(RuntimeAddress(a));
+  ce->add_call_info_here(_info);
+  ce->verify_oop_map(_info);
+  debug_only(__ should_not_reach_here());
+}
 
 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
   if (_offset != -1) {
@@ -414,10 +436,19 @@
 
 
 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
+  address a;
+  if (_info->deoptimize_on_exception()) {
+    // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
+    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  } else {
+    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+  }
+
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   __ bind(_entry);
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
+  __ call(RuntimeAddress(a));
   ce->add_call_info_here(_info);
+  ce->verify_oop_map(_info);
   debug_only(__ should_not_reach_here());
 }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -3755,6 +3755,44 @@
   }
 }
 
+#ifdef ASSERT
+// emit run-time assertion
+void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
+  assert(op->code() == lir_assert, "must be");
+
+  if (op->in_opr1()->is_valid()) {
+    assert(op->in_opr2()->is_valid(), "both operands must be valid");
+    comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+  } else {
+    assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
+    assert(op->condition() == lir_cond_always, "no other conditions allowed");
+  }
+
+  Label ok;
+  if (op->condition() != lir_cond_always) {
+    Assembler::Condition acond = Assembler::zero;
+    switch (op->condition()) {
+      case lir_cond_equal:        acond = Assembler::equal;       break;
+      case lir_cond_notEqual:     acond = Assembler::notEqual;    break;
+      case lir_cond_less:         acond = Assembler::less;        break;
+      case lir_cond_lessEqual:    acond = Assembler::lessEqual;   break;
+      case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
+      case lir_cond_greater:      acond = Assembler::greater;     break;
+      case lir_cond_belowEqual:   acond = Assembler::belowEqual;  break;
+      case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;  break;
+      default:                    ShouldNotReachHere();
+    }
+    __ jcc(acond, ok);
+  }
+  if (op->halt()) {
+    const char* str = __ code_string(op->msg());
+    __ stop(str);
+  } else {
+    breakpoint();
+  }
+  __ bind(ok);
+}
+#endif
 
 void LIR_Assembler::membar() {
   // QQQ sparc TSO uses this,
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -263,7 +263,7 @@
 
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
   assert(x->is_pinned(),"");
-  bool needs_range_check = true;
+  bool needs_range_check = x->compute_needs_range_check();
   bool use_length = x->length() != NULL;
   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@@ -278,12 +278,10 @@
   array.load_item();
   index.load_nonconstant();
 
-  if (use_length) {
-    needs_range_check = x->compute_needs_range_check();
-    if (needs_range_check) {
-      length.set_instruction(x->length());
-      length.load_item();
-    }
+  if (use_length && needs_range_check) {
+    length.set_instruction(x->length());
+    length.load_item();
+
   }
   if (needs_store_check) {
     value.load_item();
--- a/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -675,7 +675,8 @@
   switch (op2->code()) {
     case lir_cmp:
     case lir_cmp_fd2i:
-    case lir_ucmp_fd2i: {
+    case lir_ucmp_fd2i:
+    case lir_assert: {
       assert(left->is_fpu_register(), "invalid LIR");
       assert(right->is_fpu_register(), "invalid LIR");
 
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1807,6 +1807,24 @@
       break;
 #endif // INCLUDE_ALL_GCS
 
+    case predicate_failed_trap_id:
+      {
+        StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
+
+        OopMap* map = save_live_registers(sasm, 1);
+
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, map);
+        restore_live_registers(sasm);
+        __ leave();
+        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+        assert(deopt_blob != NULL, "deoptimization blob must have been created");
+
+        __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
+      }
+      break;
+
     default:
       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
         __ movptr(rax, (int)id);
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1299,25 +1299,8 @@
   __ push(rdx);
 #endif // _LP64
 
-  // Either restore the MXCSR register after returning from the JNI Call
-  // or verify that it wasn't changed.
-  if (VM_Version::supports_sse()) {
-    if (RestoreMXCSROnJNICalls) {
-      __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
-    }
-    else if (CheckJNICalls ) {
-      __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
-    }
-  }
-
-#ifndef _LP64
-  // Either restore the x87 floating pointer control word after returning
-  // from the JNI call or verify that it wasn't changed.
-  if (CheckJNICalls) {
-    __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
-  }
-#endif // _LP64
-
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
 
   // change thread state
   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -4765,6 +4765,31 @@
   pop_CPU_state();
 }
 
+void MacroAssembler::restore_cpu_control_state_after_jni() {
+  // Either restore the MXCSR register after returning from the JNI Call
+  // or verify that it wasn't changed (with -Xcheck:jni flag).
+  if (VM_Version::supports_sse()) {
+    if (RestoreMXCSROnJNICalls) {
+      ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
+    } else if (CheckJNICalls) {
+      call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
+    }
+  }
+  if (VM_Version::supports_avx()) {
+    // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
+    vzeroupper();
+  }
+
+#ifndef _LP64
+  // Either restore the x87 floating pointer control word after returning
+  // from the JNI call or verify that it wasn't changed.
+  if (CheckJNICalls) {
+    call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
+  }
+#endif // _LP64
+}
+
+
 void MacroAssembler::load_klass(Register dst, Register src) {
 #ifdef _LP64
   if (UseCompressedKlassPointers) {
@@ -5759,6 +5784,8 @@
     addptr(result, stride2);
     subl(cnt2, stride2);
     jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
+    // clean upper bits of YMM registers
+    vzeroupper();
 
     // compare wide vectors tail
     bind(COMPARE_WIDE_TAIL);
@@ -5772,6 +5799,8 @@
 
     // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
     bind(VECTOR_NOT_EQUAL);
+    // clean upper bits of YMM registers
+    vzeroupper();
     lea(str1, Address(str1, result, scale));
     lea(str2, Address(str2, result, scale));
     jmp(COMPARE_16_CHARS);
@@ -6028,6 +6057,10 @@
 
   // That's it
   bind(DONE);
+  if (UseAVX >= 2) {
+    // clean upper bits of YMM registers
+    vzeroupper();
+  }
 }
 
 void MacroAssembler::generate_fill(BasicType t, bool aligned,
@@ -6157,6 +6190,10 @@
         vmovdqu(Address(to, 0), xtmp);
         addptr(to, 32);
         subl(count, 8 << shift);
+
+        BIND(L_check_fill_8_bytes);
+        // clean upper bits of YMM registers
+        vzeroupper();
       } else {
         // Fill 32-byte chunks
         pshufd(xtmp, xtmp, 0);
@@ -6180,8 +6217,9 @@
         addptr(to, 32);
         subl(count, 8 << shift);
         jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+
+        BIND(L_check_fill_8_bytes);
       }
-      BIND(L_check_fill_8_bytes);
       addl(count, 8 << shift);
       jccb(Assembler::zero, L_exit);
       jmpb(L_fill_8_bytes);
@@ -6316,6 +6354,10 @@
     jccb(Assembler::lessEqual, L_copy_16_chars);
 
     bind(L_copy_16_chars_exit);
+    if (UseAVX >= 2) {
+      // clean upper bits of YMM registers
+      vzeroupper();
+    }
     subptr(len, 8);
     jccb(Assembler::greater, L_copy_8_chars_exit);
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -582,6 +582,9 @@
   // only if +VerifyFPU
   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
 
+  // Verify or restore cpu control state after JNI call
+  void restore_cpu_control_state_after_jni();
+
   // prints msg, dumps registers and stops execution
   void stop(const char* msg);
 
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2065,6 +2065,9 @@
 
   __ call(RuntimeAddress(native_func));
 
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
+
   // WARNING - on Windows Java Natives use pascal calling convention and pop the
   // arguments off of the stack. We could just re-adjust the stack pointer here
   // and continue to do SP relative addressing but we instead switch to FP
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2315,16 +2315,8 @@
 
   __ call(RuntimeAddress(native_func));
 
-    // Either restore the MXCSR register after returning from the JNI Call
-    // or verify that it wasn't changed.
-    if (RestoreMXCSROnJNICalls) {
-      __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
-
-    }
-    else if (CheckJNICalls ) {
-      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
-    }
-
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
 
   // Unpack native results.
   switch (ret_type) {
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -835,6 +835,11 @@
   __ BIND(L_copy_64_bytes);
     __ subl(qword_count, 8);
     __ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
+
+    if (UseUnalignedLoadStores && (UseAVX >= 2)) {
+      // clean upper bits of YMM registers
+      __ vzeroupper();
+    }
     __ addl(qword_count, 8);
     __ jccb(Assembler::zero, L_exit);
     //
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1331,6 +1331,10 @@
       }
       __ addptr(qword_count, 4);
       __ BIND(L_end);
+      if (UseAVX >= 2) {
+        // clean upper bits of YMM registers
+        __ vzeroupper();
+      }
     } else {
       // Copy 32-bytes per iteration
       __ BIND(L_loop);
@@ -1404,6 +1408,10 @@
       }
       __ subptr(qword_count, 4);
       __ BIND(L_end);
+      if (UseAVX >= 2) {
+        // clean upper bits of YMM registers
+        __ vzeroupper();
+      }
     } else {
       // Copy 32-bytes per iteration
       __ BIND(L_loop);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1080,22 +1080,8 @@
 
   // result potentially in rdx:rax or ST0
 
-  // Either restore the MXCSR register after returning from the JNI Call
-  // or verify that it wasn't changed.
-  if (VM_Version::supports_sse()) {
-    if (RestoreMXCSROnJNICalls) {
-      __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
-    }
-    else if (CheckJNICalls ) {
-      __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
-    }
-  }
-
-  // Either restore the x87 floating pointer control word after returning
-  // from the JNI call or verify that it wasn't changed.
-  if (CheckJNICalls) {
-    __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
-  }
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
 
   // save potential result in ST(0) & rdx:rax
   // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1079,15 +1079,8 @@
   __ call(rax);
   // result potentially in rax or xmm0
 
-  // Depending on runtime options, either restore the MXCSR
-  // register after returning from the JNI Call or verify that
-  // it wasn't changed during -Xcheck:jni.
-  if (RestoreMXCSROnJNICalls) {
-    __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
-  }
-  else if (CheckJNICalls) {
-    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
-  }
+  // Verify or restore cpu control state after JNI call
+  __ restore_cpu_control_state_after_jni();
 
   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
   // in order to extract the result of a method call. If the order of these
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -228,10 +228,16 @@
 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
 
 // Offset hacking within calls.
-static int pre_call_FPU_size() {
-  if (Compile::current()->in_24_bit_fp_mode())
-    return 6; // fldcw
-  return 0;
+static int pre_call_resets_size() {
+  int size = 0;
+  Compile* C = Compile::current();
+  if (C->in_24_bit_fp_mode()) {
+    size += 6; // fldcw
+  }
+  if (C->max_vector_size() > 16) {
+    size += 3; // vzeroupper
+  }
+  return size;
 }
 
 static int preserve_SP_size() {
@@ -242,21 +248,21 @@
 //       from the start of the call to the point where the return address
 //       will point.
 int MachCallStaticJavaNode::ret_addr_offset() {
-  int offset = 5 + pre_call_FPU_size();  // 5 bytes from start of call to where return address points
+  int offset = 5 + pre_call_resets_size();  // 5 bytes from start of call to where return address points
   if (_method_handle_invoke)
     offset += preserve_SP_size();
   return offset;
 }
 
 int MachCallDynamicJavaNode::ret_addr_offset() {
-  return 10 + pre_call_FPU_size();  // 10 bytes from start of call to where return address points
+  return 10 + pre_call_resets_size();  // 10 bytes from start of call to where return address points
 }
 
 static int sizeof_FFree_Float_Stack_All = -1;
 
 int MachCallRuntimeNode::ret_addr_offset() {
   assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
-  return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
+  return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
 }
 
 // Indicate if the safepoint node needs the polling page as an input.
@@ -272,7 +278,7 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
-  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += pre_call_resets_size();  // skip fldcw, if any
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -280,7 +286,7 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
-  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += pre_call_resets_size();  // skip fldcw, if any
   current_offset += preserve_SP_size();   // skip mov rbp, rsp
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
@@ -289,7 +295,7 @@
 // The address of the call instruction needs to be 4-byte aligned to
 // ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
-  current_offset += pre_call_FPU_size();  // skip fldcw, if any
+  current_offset += pre_call_resets_size();  // skip fldcw, if any
   current_offset += 5;      // skip MOV instruction
   current_offset += 1;      // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
@@ -583,16 +589,20 @@
   // Remove two words for return addr and rbp,
   framesize -= 2*wordSize;
 
-  if( C->in_24_bit_fp_mode() ) {
+  if (C->max_vector_size() > 16) {
+    st->print("VZEROUPPER");
+    st->cr(); st->print("\t");
+  }
+  if (C->in_24_bit_fp_mode()) {
     st->print("FLDCW  standard control word");
     st->cr(); st->print("\t");
   }
-  if( framesize ) {
+  if (framesize) {
     st->print("ADD    ESP,%d\t# Destroy frame",framesize);
     st->cr(); st->print("\t");
   }
   st->print_cr("POPL   EBP"); st->print("\t");
-  if( do_polling() && C->is_method_compilation() ) {
+  if (do_polling() && C->is_method_compilation()) {
     st->print("TEST   PollPage,EAX\t! Poll Safepoint");
     st->cr(); st->print("\t");
   }
@@ -602,8 +612,14 @@
 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile *C = ra_->C;
 
+  if (C->max_vector_size() > 16) {
+    // Clear upper bits of YMM registers when current compiled code uses
+    // wide vectors to avoid AVX <-> SSE transition penalty during call.
+    MacroAssembler masm(&cbuf);
+    masm.vzeroupper();
+  }
   // If method set FPU control word, restore to standard control word
-  if( C->in_24_bit_fp_mode() ) {
+  if (C->in_24_bit_fp_mode()) {
     MacroAssembler masm(&cbuf);
     masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
   }
@@ -615,12 +631,11 @@
 
   // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
 
-  if( framesize >= 128 ) {
+  if (framesize >= 128) {
     emit_opcode(cbuf, 0x81); // add  SP, #framesize
     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
     emit_d32(cbuf, framesize);
-  }
-  else if( framesize ) {
+  } else if (framesize) {
     emit_opcode(cbuf, 0x83); // add  SP, #framesize
     emit_rm(cbuf, 0x3, 0x00, ESP_enc);
     emit_d8(cbuf, framesize);
@@ -628,7 +643,7 @@
 
   emit_opcode(cbuf, 0x58 | EBP_enc);
 
-  if( do_polling() && C->is_method_compilation() ) {
+  if (do_polling() && C->is_method_compilation()) {
     cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
     emit_opcode(cbuf,0x85);
     emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
@@ -640,7 +655,8 @@
   Compile *C = ra_->C;
   // If method set FPU control word, restore to standard control word
   int size = C->in_24_bit_fp_mode() ? 6 : 0;
-  if( do_polling() && C->is_method_compilation() ) size += 6;
+  if (C->max_vector_size() > 16) size += 3; // vzeroupper
+  if (do_polling() && C->is_method_compilation()) size += 6;
 
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
@@ -649,7 +665,7 @@
 
   size++; // popl rbp,
 
-  if( framesize >= 128 ) {
+  if (framesize >= 128) {
     size += 6;
   } else {
     size += framesize ? 3 : 0;
@@ -1853,20 +1869,26 @@
   %}
 
 
-  enc_class pre_call_FPU %{
+  enc_class pre_call_resets %{
     // If method sets FPU control word restore it here
     debug_only(int off0 = cbuf.insts_size());
-    if( Compile::current()->in_24_bit_fp_mode() ) {
-      MacroAssembler masm(&cbuf);
-      masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+    if (ra_->C->in_24_bit_fp_mode()) {
+      MacroAssembler _masm(&cbuf);
+      __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+    }
+    if (ra_->C->max_vector_size() > 16) {
+      // Clear upper bits of YMM registers when current compiled code uses
+      // wide vectors to avoid AVX <-> SSE transition penalty during call.
+      MacroAssembler _masm(&cbuf);
+      __ vzeroupper();
     }
     debug_only(int off1 = cbuf.insts_size());
-    assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
+    assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
   %}
 
   enc_class post_call_FPU %{
     // If method sets FPU control word do it here also
-    if( Compile::current()->in_24_bit_fp_mode() ) {
+    if (Compile::current()->in_24_bit_fp_mode()) {
       MacroAssembler masm(&cbuf);
       masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
     }
@@ -1877,17 +1899,17 @@
     // who we intended to call.
     cbuf.set_insts_mark();
     $$$emit8$primary;
-    if ( !_method ) {
+    if (!_method) {
       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      runtime_call_Relocation::spec(), RELOC_IMM32 );
-    } else if(_optimized_virtual) {
+    } else if (_optimized_virtual) {
       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
     } else {
       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      static_call_Relocation::spec(), RELOC_IMM32 );
     }
-    if( _method ) {  // Emit stub for static call
+    if (_method) {  // Emit stub for static call
       emit_java_to_interp(cbuf);
     }
   %}
@@ -12828,7 +12850,7 @@
   ins_cost(300);
   format %{ "CALL,static " %}
   opcode(0xE8); /* E8 cd */
-  ins_encode( pre_call_FPU,
+  ins_encode( pre_call_resets,
               Java_Static_Call( meth ),
               call_epilog,
               post_call_FPU );
@@ -12849,7 +12871,7 @@
   ins_cost(300);
   format %{ "CALL,static/MethodHandle " %}
   opcode(0xE8); /* E8 cd */
-  ins_encode( pre_call_FPU,
+  ins_encode( pre_call_resets,
               preserve_SP,
               Java_Static_Call( meth ),
               restore_SP,
@@ -12870,7 +12892,7 @@
   format %{ "MOV    EAX,(oop)-1\n\t"
             "CALL,dynamic" %}
   opcode(0xE8); /* E8 cd */
-  ins_encode( pre_call_FPU,
+  ins_encode( pre_call_resets,
               Java_Dynamic_Call( meth ),
               call_epilog,
               post_call_FPU );
@@ -12887,7 +12909,7 @@
   format %{ "CALL,runtime " %}
   opcode(0xE8); /* E8 cd */
   // Use FFREEs to clear entries in float stack
-  ins_encode( pre_call_FPU,
+  ins_encode( pre_call_resets,
               FFree_Float_Stack_All,
               Java_To_Runtime( meth ),
               post_call_FPU );
@@ -12902,7 +12924,7 @@
   ins_cost(300);
   format %{ "CALL_LEAF,runtime " %}
   opcode(0xE8); /* E8 cd */
-  ins_encode( pre_call_FPU,
+  ins_encode( pre_call_resets,
               FFree_Float_Stack_All,
               Java_To_Runtime( meth ),
               Verify_FPU_For_Leaf, post_call_FPU );
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -399,6 +399,9 @@
 static int preserve_SP_size() {
   return 3;  // rex.w, op, rm(reg/reg)
 }
+static int clear_avx_size() {
+  return (Compile::current()->max_vector_size() > 16) ? 3 : 0;  // vzeroupper
+}
 
 // !!!!! Special hack to get all types of calls to specify the byte offset
 //       from the start of the call to the point where the return address
@@ -406,6 +409,7 @@
 int MachCallStaticJavaNode::ret_addr_offset()
 {
   int offset = 5; // 5 bytes from start of call to where return address points
+  offset += clear_avx_size();
   if (_method_handle_invoke)
     offset += preserve_SP_size();
   return offset;
@@ -413,11 +417,16 @@
 
 int MachCallDynamicJavaNode::ret_addr_offset()
 {
-  return 15; // 15 bytes from start of call to where return address points
+  int offset = 15; // 15 bytes from start of call to where return address points
+  offset += clear_avx_size();
+  return offset;
 }
 
-// In os_cpu .ad file
-// int MachCallRuntimeNode::ret_addr_offset()
+int MachCallRuntimeNode::ret_addr_offset() {
+  int offset = 13; // movq r10,#addr; callq (r10)
+  offset += clear_avx_size();
+  return offset;
+}
 
 // Indicate if the safepoint node needs the polling page as an input,
 // it does if the polling page is more than disp32 away.
@@ -434,6 +443,7 @@
 // ensure that it does not span a cache line so that it can be patched.
 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
 {
+  current_offset += clear_avx_size(); // skip vzeroupper
   current_offset += 1; // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -443,6 +453,7 @@
 int CallStaticJavaHandleNode::compute_padding(int current_offset) const
 {
   current_offset += preserve_SP_size();   // skip mov rbp, rsp
+  current_offset += clear_avx_size(); // skip vzeroupper
   current_offset += 1; // skip call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -451,6 +462,7 @@
 // ensure that it does not span a cache line so that it can be patched.
 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
 {
+  current_offset += clear_avx_size(); // skip vzeroupper
   current_offset += 11; // skip movq instruction + call opcode byte
   return round_to(current_offset, alignment_required()) - current_offset;
 }
@@ -764,6 +776,11 @@
 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
   Compile* C = ra_->C;
+  if (C->max_vector_size() > 16) {
+    st->print("vzeroupper");
+    st->cr(); st->print("\t");
+  }
+
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   // Remove word for return adr already pushed
@@ -793,6 +810,13 @@
 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
 {
   Compile* C = ra_->C;
+  if (C->max_vector_size() > 16) {
+    // Clear upper bits of YMM registers when current compiled code uses
+    // wide vectors to avoid AVX <-> SSE transition penalty during call.
+    MacroAssembler _masm(&cbuf);
+    __ vzeroupper();
+  }
+
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   // Remove word for return adr already pushed
@@ -2008,6 +2032,25 @@
     __ bind(miss);
   %}
 
+  enc_class clear_avx %{
+    debug_only(int off0 = cbuf.insts_size());
+    if (ra_->C->max_vector_size() > 16) {
+      // Clear upper bits of YMM registers when current compiled code uses
+      // wide vectors to avoid AVX <-> SSE transition penalty during call.
+      MacroAssembler _masm(&cbuf);
+      __ vzeroupper();
+    }
+    debug_only(int off1 = cbuf.insts_size());
+    assert(off1 - off0 == clear_avx_size(), "correct size prediction");
+  %}
+
+  enc_class Java_To_Runtime(method meth) %{
+    // No relocation needed
+    MacroAssembler _masm(&cbuf);
+    __ mov64(r10, (int64_t) $meth$$method);
+    __ call(r10);
+  %}
+
   enc_class Java_To_Interpreter(method meth)
   %{
     // CALL Java_To_Interpreter
@@ -11366,7 +11409,7 @@
   ins_cost(300);
   format %{ "call,static " %}
   opcode(0xE8); /* E8 cd */
-  ins_encode(Java_Static_Call(meth), call_epilog);
+  ins_encode(clear_avx, Java_Static_Call(meth), call_epilog);
   ins_pipe(pipe_slow);
   ins_alignment(4);
 %}
@@ -11384,7 +11427,7 @@
   ins_cost(300);
   format %{ "call,static/MethodHandle " %}
   opcode(0xE8); /* E8 cd */
-  ins_encode(preserve_SP,
+  ins_encode(clear_avx, preserve_SP,
              Java_Static_Call(meth),
              restore_SP,
              call_epilog);
@@ -11403,7 +11446,7 @@
   ins_cost(300);
   format %{ "movq    rax, #Universe::non_oop_word()\n\t"
             "call,dynamic " %}
-  ins_encode(Java_Dynamic_Call(meth), call_epilog);
+  ins_encode(clear_avx, Java_Dynamic_Call(meth), call_epilog);
   ins_pipe(pipe_slow);
   ins_alignment(4);
 %}
@@ -11416,8 +11459,7 @@
 
   ins_cost(300);
   format %{ "call,runtime " %}
-  opcode(0xE8); /* E8 cd */
-  ins_encode(Java_To_Runtime(meth));
+  ins_encode(clear_avx, Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
 %}
 
@@ -11429,8 +11471,7 @@
 
   ins_cost(300);
   format %{ "call_leaf,runtime " %}
-  opcode(0xE8); /* E8 cd */
-  ins_encode(Java_To_Runtime(meth));
+  ins_encode(clear_avx, Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
 %}
 
@@ -11442,7 +11483,6 @@
 
   ins_cost(300);
   format %{ "call_leaf_nofp,runtime " %}
-  opcode(0xE8); /* E8 cd */
   ins_encode(Java_To_Runtime(meth));
   ins_pipe(pipe_slow);
 %}
--- a/hotspot/src/os/posix/launcher/launcher.script	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os/posix/launcher/launcher.script	Thu Apr 04 17:01:34 2013 -0700
@@ -199,7 +199,7 @@
 	rm -f $GDBSCR
         ;;
     dbx)
-        $DBX -s $MYDIR/.dbxrc $LAUNCHER $JPARAMS
+        $DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS
         ;;
     valgrind)
         echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -3773,6 +3773,8 @@
   }
 }
 
+static jint initSock();
+
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
   // Allocate a single page and mark it as readable for safepoint polling
@@ -3903,6 +3905,10 @@
     if (!success) UseNUMAInterleaving = false;
   }
 
+  if (initSock() != JNI_OK) {
+    return JNI_ERR;
+  }
+
   return JNI_OK;
 }
 
@@ -4899,42 +4905,24 @@
 // We don't build a headless jre for Windows
 bool os::is_headless_jre() { return false; }
 
-
-typedef CRITICAL_SECTION mutex_t;
-#define mutexInit(m)    InitializeCriticalSection(m)
-#define mutexDestroy(m) DeleteCriticalSection(m)
-#define mutexLock(m)    EnterCriticalSection(m)
-#define mutexUnlock(m)  LeaveCriticalSection(m)
-
-static bool sock_initialized = FALSE;
-static mutex_t sockFnTableMutex;
-
-static void initSock() {
+static jint initSock() {
   WSADATA wsadata;
 
   if (!os::WinSock2Dll::WinSock2Available()) {
-    jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
+    jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
       ::GetLastError());
-    return;
-  }
-  if (sock_initialized == TRUE) return;
-
-  ::mutexInit(&sockFnTableMutex);
-  ::mutexLock(&sockFnTableMutex);
-  if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) {
-      jio_fprintf(stderr, "Could not initialize Winsock\n");
-  }
-  sock_initialized = TRUE;
-  ::mutexUnlock(&sockFnTableMutex);
+    return JNI_ERR;
+  }
+
+  if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
+    jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
+      ::GetLastError());
+    return JNI_ERR;
+  }
+  return JNI_OK;
 }
 
 struct hostent* os::get_host_by_name(char* name) {
-  if (!sock_initialized) {
-    initSock();
-  }
-  if (!os::WinSock2Dll::WinSock2Available()) {
-    return NULL;
-  }
   return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
 }
 
--- a/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -55,20 +55,6 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth) %{
-    // No relocation needed
-
-    // movq r10, <meth>
-    emit_opcode(cbuf, Assembler::REX_WB);
-    emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
-    emit_d64(cbuf, (int64_t) $meth$$method);
-
-    // call (r10)
-    emit_opcode(cbuf, Assembler::REX_B);
-    emit_opcode(cbuf, 0xFF);
-    emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
-  %}
-
 %}
 
 
@@ -76,8 +62,4 @@
 
 source %{
 
-int MachCallRuntimeNode::ret_addr_offset() {
-  return 13; // movq r10,#addr; callq (r10)
-}
-
 %}
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -55,20 +55,6 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth) %{
-    // No relocation needed
-
-    // movq r10, <meth>
-    emit_opcode(cbuf, Assembler::REX_WB);
-    emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
-    emit_d64(cbuf, (int64_t) $meth$$method);
-
-    // call (r10)
-    emit_opcode(cbuf, Assembler::REX_B);
-    emit_opcode(cbuf, 0xFF);
-    emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
-  %}
-
 %}
 
 
@@ -76,8 +62,4 @@
 
 source %{
 
-int MachCallRuntimeNode::ret_addr_offset() {
-  return 13; // movq r10,#addr; callq (r10)
-}
-
 %}
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -54,39 +54,10 @@
   // main source block for now.  In future, we can generalize this by
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
-
-  enc_class Java_To_Runtime(method meth) %{
-    // No relocation needed
-
-    // movq r10, <meth>
-    emit_opcode(cbuf, Assembler::REX_WB);
-    emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
-    emit_d64(cbuf, (int64_t) $meth$$method);
-
-    // call (r10)
-    emit_opcode(cbuf, Assembler::REX_B);
-    emit_opcode(cbuf, 0xFF);
-    emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
-  %}
-
-  enc_class post_call_verify_mxcsr %{
-    MacroAssembler _masm(&cbuf);
-    if (RestoreMXCSROnJNICalls) {
-      __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
-    }
-    else if (CheckJNICalls) {
-      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
-    }
-  %}
 %}
 
 
 // Platform dependent source
 
 source %{
-
-int MachCallRuntimeNode::ret_addr_offset() {
-  return 13; // movq r10,#addr; callq (r10)
-}
-
 %}
--- a/hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Thu Apr 04 17:01:34 2013 -0700
@@ -53,30 +53,11 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime
-    // No relocation needed
+%}
+
 
-    // movq r10, <meth>
-    emit_opcode(cbuf, Assembler::REX_WB);
-    emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
-    emit_d64(cbuf, (int64_t) $meth$$method);
+// Platform dependent source
 
-    // call (r10)
-    emit_opcode(cbuf, Assembler::REX_B);
-    emit_opcode(cbuf, 0xFF);
-    emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
-  %}
+source %{
 
 %}
-
-//
-// Platform dependent source
-//
-source %{
-
-int MachCallRuntimeNode::ret_addr_offset()
-{
-  return 13; // movq r10,#addr; callq (r10)
-}
-
-%}
--- a/hotspot/src/share/vm/adlc/archDesc.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -832,6 +832,7 @@
     int         length  = (int)strlen(rc_name) + (int)strlen(mask) + 5;
     char       *regMask = new char[length];
     sprintf(regMask,"%s%s()", rc_name, mask);
+    delete[] rc_name;
     return regMask;
   }
 }
--- a/hotspot/src/share/vm/adlc/dfa.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/adlc/dfa.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -191,12 +191,19 @@
 // Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR)
 //
 static void child_test(FILE *fp, MatchList &mList) {
-  if( mList._lchild )           // If left child, check it
-    fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", ArchDesc::getMachOperEnum(mList._lchild));
-  if( mList._lchild && mList._rchild )      // If both, add the "&&"
-    fprintf(fp, " && " );
-  if( mList._rchild )           // If right child, check it
-    fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", ArchDesc::getMachOperEnum(mList._rchild));
+  if (mList._lchild) { // If left child, check it
+    const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
+    fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", lchild_to_upper);
+    delete[] lchild_to_upper;
+  }
+  if (mList._lchild && mList._rchild) { // If both, add the "&&"
+    fprintf(fp, " && ");
+  }
+  if (mList._rchild) { // If right child, check it
+    const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
+    fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", rchild_to_upper);
+    delete[] rchild_to_upper;
+  }
 }
 
 //---------------------------calc_cost-----------------------------------------
@@ -206,13 +213,17 @@
 Expr *ArchDesc::calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status) {
   fprintf(fp, "%sunsigned int c = ", spaces);
   Expr *c = new Expr("0");
-  if (mList._lchild ) {                   // If left child, add it in
-    sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", ArchDesc::getMachOperEnum(mList._lchild));
+  if (mList._lchild) { // If left child, add it in
+    const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
+    sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", lchild_to_upper);
     c->add(Expr::buffer());
+    delete[] lchild_to_upper;
 }
-  if (mList._rchild) {                    // If right child, add it in
-    sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", ArchDesc::getMachOperEnum(mList._rchild));
+  if (mList._rchild) { // If right child, add it in
+    const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
+    sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", rchild_to_upper);
     c->add(Expr::buffer());
+    delete[] rchild_to_upper;
   }
   // Add in cost of this rule
   const char *mList_cost = mList.get_cost();
@@ -232,15 +243,17 @@
   fprintf(fp, "%s", spaces4);
   // Only generate child tests if this is not a leaf node
   bool has_child_constraints = mList._lchild || mList._rchild;
-  const char *predicate_test        = mList.get_pred();
-  if( has_child_constraints || predicate_test ) {
+  const char *predicate_test = mList.get_pred();
+  if (has_child_constraints || predicate_test) {
     // Open the child-and-predicate-test braces
     fprintf(fp, "if( ");
     status.set_constraint(hasConstraint);
     child_test(fp, mList);
     // Only generate predicate test if one exists for this match
-    if( predicate_test ) {
-      if( has_child_constraints ) { fprintf(fp," &&\n"); }
+    if (predicate_test) {
+      if (has_child_constraints) {
+        fprintf(fp," &&\n");
+      }
       fprintf(fp, "%s  %s", spaces6, predicate_test);
     }
     // End of outer tests
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -937,4 +937,6 @@
 void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
 void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
 void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
+void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
+void Canonicalizer::do_Assert(Assert* x) {}
 void Canonicalizer::do_MemBar(MemBar* x) {}
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -107,6 +107,8 @@
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
+  virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+  virtual void do_Assert         (Assert*          x);
 };
 
 #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -166,6 +166,22 @@
 #endif // PRODUCT
 };
 
+// stub used when predicate fails and deoptimization is needed
+class PredicateFailedStub: public CodeStub {
+ private:
+  CodeEmitInfo* _info;
+
+ public:
+  PredicateFailedStub(CodeEmitInfo* info);
+  virtual void emit_code(LIR_Assembler* e);
+  virtual CodeEmitInfo* info() const             { return _info; }
+  virtual void visit(LIR_OpVisitState* visitor) {
+    visitor->do_slow_case(_info);
+  }
+#ifndef PRODUCT
+  virtual void print_name(outputStream* out) const { out->print("PredicateFailedStub"); }
+#endif // PRODUCT
+};
 
 class DivByZeroStub: public CodeStub {
  private:
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -33,13 +33,16 @@
 #include "c1/c1_ValueStack.hpp"
 #include "code/debugInfoRec.hpp"
 #include "compiler/compileLog.hpp"
+#include "c1/c1_RangeCheckElimination.hpp"
 
 
 typedef enum {
   _t_compile,
   _t_setup,
-  _t_optimizeIR,
   _t_buildIR,
+  _t_optimize_blocks,
+  _t_optimize_null_checks,
+  _t_rangeCheckElimination,
   _t_emit_lir,
   _t_linearScan,
   _t_lirGeneration,
@@ -52,8 +55,10 @@
 static const char * timer_name[] = {
   "compile",
   "setup",
-  "optimizeIR",
   "buildIR",
+  "optimize_blocks",
+  "optimize_null_checks",
+  "rangeCheckElimination",
   "emit_lir",
   "linearScan",
   "lirGeneration",
@@ -159,9 +164,9 @@
   if (UseC1Optimizations) {
     NEEDS_CLEANUP
     // optimization
-    PhaseTraceTime timeit(_t_optimizeIR);
+    PhaseTraceTime timeit(_t_optimize_blocks);
 
-    _hir->optimize();
+    _hir->optimize_blocks();
   }
 
   _hir->verify();
@@ -180,13 +185,47 @@
   _hir->compute_code();
 
   if (UseGlobalValueNumbering) {
-    ResourceMark rm;
+    // No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
     int instructions = Instruction::number_of_instructions();
     GlobalValueNumbering gvn(_hir);
     assert(instructions == Instruction::number_of_instructions(),
            "shouldn't have created an instructions");
   }
 
+  _hir->verify();
+
+#ifndef PRODUCT
+  if (PrintCFGToFile) {
+    CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
+  }
+#endif
+
+  if (RangeCheckElimination) {
+    if (_hir->osr_entry() == NULL) {
+      PhaseTraceTime timeit(_t_rangeCheckElimination);
+      RangeCheckElimination::eliminate(_hir);
+    }
+  }
+
+#ifndef PRODUCT
+  if (PrintCFGToFile) {
+    CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
+  }
+#endif
+
+  if (UseC1Optimizations) {
+    // loop invariant code motion reorders instructions and range
+    // check elimination adds new instructions so do null check
+    // elimination after.
+    NEEDS_CLEANUP
+    // optimization
+    PhaseTraceTime timeit(_t_optimize_null_checks);
+
+    _hir->eliminate_null_checks();
+  }
+
+  _hir->verify();
+
   // compute use counts after global value numbering
   _hir->compute_use_counts();
 
@@ -502,6 +541,7 @@
 , _next_id(0)
 , _next_block_id(0)
 , _code(buffer_blob)
+, _has_access_indexed(false)
 , _current_instruction(NULL)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
@@ -567,7 +607,9 @@
   tty->print_cr("    Detailed C1 Timings");
   tty->print_cr("       Setup time:        %6.3f s (%4.1f%%)",    timers[_t_setup].seconds(),           (timers[_t_setup].seconds() / total) * 100.0);
   tty->print_cr("       Build IR:          %6.3f s (%4.1f%%)",    timers[_t_buildIR].seconds(),         (timers[_t_buildIR].seconds() / total) * 100.0);
-  tty->print_cr("         Optimize:           %6.3f s (%4.1f%%)", timers[_t_optimizeIR].seconds(),      (timers[_t_optimizeIR].seconds() / total) * 100.0);
+  float t_optimizeIR = timers[_t_optimize_blocks].seconds() + timers[_t_optimize_null_checks].seconds();
+  tty->print_cr("         Optimize:           %6.3f s (%4.1f%%)", t_optimizeIR,                         (t_optimizeIR / total) * 100.0);
+  tty->print_cr("         RCE:                %6.3f s (%4.1f%%)", timers[_t_rangeCheckElimination].seconds(),      (timers[_t_rangeCheckElimination].seconds() / total) * 100.0);
   tty->print_cr("       Emit LIR:          %6.3f s (%4.1f%%)",    timers[_t_emit_lir].seconds(),        (timers[_t_emit_lir].seconds() / total) * 100.0);
   tty->print_cr("         LIR Gen:          %6.3f s (%4.1f%%)",   timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0);
   tty->print_cr("         Linear Scan:      %6.3f s (%4.1f%%)",   timers[_t_linearScan].seconds(),    (timers[_t_linearScan].seconds() / total) * 100.0);
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -26,8 +26,10 @@
 #define SHARE_VM_C1_C1_COMPILATION_HPP
 
 #include "ci/ciEnv.hpp"
+#include "ci/ciMethodData.hpp"
 #include "code/exceptionHandlerTable.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/deoptimization.hpp"
 
 class CompilationResourceObj;
 class XHandlers;
@@ -85,6 +87,7 @@
   LinearScan*        _allocator;
   CodeOffsets        _offsets;
   CodeBuffer         _code;
+  bool               _has_access_indexed;
 
   // compilation helpers
   void initialize();
@@ -140,6 +143,7 @@
   C1_MacroAssembler* masm() const                { return _masm; }
   CodeOffsets* offsets()                         { return &_offsets; }
   Arena* arena()                                 { return _arena; }
+  bool has_access_indexed()                      { return _has_access_indexed; }
 
   // Instruction ids
   int get_next_id()                              { return _next_id++; }
@@ -154,6 +158,7 @@
   void set_has_fpu_code(bool f)                  { _has_fpu_code = f; }
   void set_has_unsafe_access(bool f)             { _has_unsafe_access = f; }
   void set_would_profile(bool f)                 { _would_profile = f; }
+  void set_has_access_indexed(bool f)            { _has_access_indexed = f; }
   // Add a set of exception handlers covering the given PC offset
   void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
   // Statistics gathering
@@ -233,6 +238,14 @@
     return env()->comp_level() == CompLevel_full_profile &&
       C1UpdateMethodData && C1ProfileCheckcasts;
   }
+
+  // will compilation make optimistic assumptions that might lead to
+  // deoptimization and that the runtime will account for?
+  bool is_optimistic() const                             {
+    return !TieredCompilation &&
+      (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
+      method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
+  }
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -947,7 +947,9 @@
 
 
 void GraphBuilder::load_indexed(BasicType type) {
-  ValueStack* state_before = copy_state_for_exception();
+  // In case of in block code motion in range check elimination
+  ValueStack* state_before = copy_state_indexed_access();
+  compilation()->set_has_access_indexed(true);
   Value index = ipop();
   Value array = apop();
   Value length = NULL;
@@ -961,7 +963,9 @@
 
 
 void GraphBuilder::store_indexed(BasicType type) {
-  ValueStack* state_before = copy_state_for_exception();
+  // In case of in block code motion in range check elimination
+  ValueStack* state_before = copy_state_indexed_access();
+  compilation()->set_has_access_indexed(true);
   Value value = pop(as_ValueType(type));
   Value index = ipop();
   Value array = apop();
@@ -1179,7 +1183,9 @@
   BlockBegin* tsux = block_at(stream()->get_dest());
   BlockBegin* fsux = block_at(stream()->next_bci());
   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
-  Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
+  // In case of loop invariant code motion or predicate insertion
+  // before the body of a loop the state is needed
+  Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
 
   assert(i->as_Goto() == NULL ||
          (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
@@ -1294,7 +1300,9 @@
     BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
     BlockBegin* fsux = block_at(bci() + sw.default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
-    ValueStack* state_before = is_bb ? copy_state_before() : NULL;
+    // In case of loop invariant code motion or predicate insertion
+    // before the body of a loop the state is needed
+    ValueStack* state_before = copy_state_if_bb(is_bb);
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
   } else {
     // collect successors
@@ -1308,7 +1316,9 @@
     // add default successor
     if (sw.default_offset() < 0) has_bb = true;
     sux->at_put(i, block_at(bci() + sw.default_offset()));
-    ValueStack* state_before = has_bb ? copy_state_before() : NULL;
+    // In case of loop invariant code motion or predicate insertion
+    // before the body of a loop the state is needed
+    ValueStack* state_before = copy_state_if_bb(has_bb);
     Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
 #ifdef ASSERT
     if (res->as_Goto()) {
@@ -1336,7 +1346,9 @@
     BlockBegin* tsux = block_at(bci() + pair.offset());
     BlockBegin* fsux = block_at(bci() + sw.default_offset());
     bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
-    ValueStack* state_before = is_bb ? copy_state_before() : NULL;
+    // In case of loop invariant code motion or predicate insertion
+    // before the body of a loop the state is needed
+    ValueStack* state_before = copy_state_if_bb(is_bb);;
     append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
   } else {
     // collect successors & keys
@@ -1353,7 +1365,9 @@
     // add default successor
     if (sw.default_offset() < 0) has_bb = true;
     sux->at_put(i, block_at(bci() + sw.default_offset()));
-    ValueStack* state_before = has_bb ? copy_state_before() : NULL;
+    // In case of loop invariant code motion or predicate insertion
+    // before the body of a loop the state is needed
+    ValueStack* state_before = copy_state_if_bb(has_bb);
     Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
 #ifdef ASSERT
     if (res->as_Goto()) {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -301,6 +301,8 @@
   ValueStack* copy_state_exhandling();
   ValueStack* copy_state_for_exception_with_bci(int bci);
   ValueStack* copy_state_for_exception();
+  ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
+  ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
 
   //
   // Inlining support
--- a/hotspot/src/share/vm/c1/c1_IR.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -182,13 +182,14 @@
 // Implementation of CodeEmitInfo
 
 // Stack must be NON-null
-CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers)
+CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
   : _scope(stack->scope())
   , _scope_debug_info(NULL)
   , _oop_map(NULL)
   , _stack(stack)
   , _exception_handlers(exception_handlers)
-  , _is_method_handle_invoke(false) {
+  , _is_method_handle_invoke(false)
+  , _deoptimize_on_exception(deoptimize_on_exception) {
   assert(_stack != NULL, "must be non null");
 }
 
@@ -199,7 +200,8 @@
   , _scope_debug_info(NULL)
   , _oop_map(NULL)
   , _stack(stack == NULL ? info->_stack : stack)
-  , _is_method_handle_invoke(info->_is_method_handle_invoke) {
+  , _is_method_handle_invoke(info->_is_method_handle_invoke)
+  , _deoptimize_on_exception(info->_deoptimize_on_exception) {
 
   // deep copy of exception handlers
   if (info->_exception_handlers != NULL) {
@@ -239,7 +241,7 @@
 }
 
 
-void IR::optimize() {
+void IR::optimize_blocks() {
   Optimizer opt(this);
   if (!compilation()->profile_branches()) {
     if (DoCEE) {
@@ -257,6 +259,10 @@
 #endif
     }
   }
+}
+
+void IR::eliminate_null_checks() {
+  Optimizer opt(this);
   if (EliminateNullChecks) {
     opt.eliminate_null_checks();
 #ifndef PRODUCT
@@ -429,6 +435,7 @@
   BlockList  _loop_end_blocks;     // list of all loop end blocks collected during count_edges
   BitMap2D   _loop_map;            // two-dimensional bit set: a bit is set if a block is contained in a loop
   BlockList  _work_list;           // temporary list (used in mark_loops and compute_order)
+  BlockList  _loop_headers;
 
   Compilation* _compilation;
 
@@ -594,6 +601,7 @@
     TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
 
     cur->set_loop_index(_num_loops);
+    _loop_headers.append(cur);
     _num_loops++;
   }
 
@@ -656,6 +664,16 @@
       // -> this is not a natural loop, so ignore it
       TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
 
+      BlockBegin *loop_header = _loop_headers.at(i);
+      assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");
+
+      for (int j = 0; j < loop_header->number_of_preds(); j++) {
+        BlockBegin *pred = loop_header->pred_at(j);
+        pred->clear(BlockBegin::linear_scan_loop_end_flag);
+      }
+
+      loop_header->clear(BlockBegin::linear_scan_loop_header_flag);
+
       for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
         clear_block_in_loop(i, block_id);
       }
@@ -729,9 +747,20 @@
 
   } else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
     TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
-    assert(cur->number_of_preds() > 1, "");
+    // Does not hold for exception blocks
+    assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), "");
     cur->set_dominator(common_dominator(cur->dominator(), parent));
   }
+
+  // Additional edge to xhandler of all our successors
+  // range check elimination needs that the state at the end of a
+  // block be valid in every block it dominates so cur must dominate
+  // the exception handlers of its successors.
+  int num_cur_xhandler = cur->number_of_exception_handlers();
+  for (int j = 0; j < num_cur_xhandler; j++) {
+    BlockBegin* xhandler = cur->exception_handler_at(j);
+    compute_dominator(xhandler, parent);
+  }
 }
 
 
@@ -898,7 +927,6 @@
     num_sux = cur->number_of_exception_handlers();
     for (i = 0; i < num_sux; i++) {
       BlockBegin* sux = cur->exception_handler_at(i);
-      compute_dominator(sux, cur);
       if (ready_for_processing(sux)) {
         sort_into_work_list(sux);
       }
@@ -918,8 +946,23 @@
 
     BlockBegin* dominator = block->pred_at(0);
     int num_preds = block->number_of_preds();
-    for (int i = 1; i < num_preds; i++) {
-      dominator = common_dominator(dominator, block->pred_at(i));
+
+    TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: Processing B%d", block->block_id()));
+
+    for (int j = 0; j < num_preds; j++) {
+
+      BlockBegin *pred = block->pred_at(j);
+      TRACE_LINEAR_SCAN(4, tty->print_cr("   DOM: Subrocessing B%d", pred->block_id()));
+
+      if (block->is_set(BlockBegin::exception_entry_flag)) {
+        dominator = common_dominator(dominator, pred);
+        int num_pred_preds = pred->number_of_preds();
+        for (int k = 0; k < num_pred_preds; k++) {
+          dominator = common_dominator(dominator, pred->pred_at(k));
+        }
+      } else {
+        dominator = common_dominator(dominator, pred);
+      }
     }
 
     if (dominator != block->dominator()) {
@@ -946,6 +989,21 @@
 
   // check that dominators are correct
   assert(!compute_dominators_iter(), "fix point not reached");
+
+  // Add Blocks to dominates-Array
+  int num_blocks = _linear_scan_order->length();
+  for (int i = 0; i < num_blocks; i++) {
+    BlockBegin* block = _linear_scan_order->at(i);
+
+    BlockBegin *dom = block->dominator();
+    if (dom) {
+      assert(dom->dominator_depth() != -1, "Dominator must have been visited before");
+      dom->dominates()->append(block);
+      block->set_dominator_depth(dom->dominator_depth() + 1);
+    } else {
+      block->set_dominator_depth(0);
+    }
+  }
 }
 
 
@@ -1032,7 +1090,7 @@
       BlockBegin* sux = cur->sux_at(j);
 
       assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
-      if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {
+      if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {
         assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
       }
       if (cur->loop_depth() == sux->loop_depth()) {
@@ -1044,7 +1102,7 @@
       BlockBegin* pred = cur->pred_at(j);
 
       assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
-      if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
+      if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {
         assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
       }
       if (cur->loop_depth() == pred->loop_depth()) {
@@ -1060,7 +1118,8 @@
     } else {
       assert(cur->dominator() != NULL, "all but first block must have dominator");
     }
-    assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator");
+    // Assertion does not hold for exception handlers
+    assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
   }
 
   // check that all loops are continuous
@@ -1249,9 +1308,22 @@
   }
 };
 
+class VerifyBlockBeginField : public BlockClosure {
+
+public:
+
+  virtual void block_do(BlockBegin *block) {
+    for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
+      assert(cur->block() == block, "Block begin is not correct");
+    }
+  }
+};
+
 void IR::verify() {
 #ifdef ASSERT
   PredecessorValidator pv(this);
+  VerifyBlockBeginField verifier;
+  this->iterate_postorder(&verifier);
 #endif
 }
 
--- a/hotspot/src/share/vm/c1/c1_IR.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -254,6 +254,7 @@
   OopMap*           _oop_map;
   ValueStack*       _stack;                      // used by deoptimization (contains also monitors
   bool              _is_method_handle_invoke;    // true if the associated call site is a MethodHandle call site.
+  bool              _deoptimize_on_exception;
 
   FrameMap*     frame_map() const                { return scope()->compilation()->frame_map(); }
   Compilation*  compilation() const              { return scope()->compilation(); }
@@ -261,7 +262,7 @@
  public:
 
   // use scope from ValueStack
-  CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers);
+  CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception = false);
 
   // make a copy
   CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL);
@@ -272,6 +273,7 @@
   IRScope* scope() const                         { return _scope; }
   XHandlers* exception_handlers() const          { return _exception_handlers; }
   ValueStack* stack() const                      { return _stack; }
+  bool deoptimize_on_exception() const           { return _deoptimize_on_exception; }
 
   void add_register_oop(LIR_Opr opr);
   void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
@@ -309,7 +311,8 @@
   int              max_stack() const             { return top_scope()->max_stack(); } // expensive
 
   // ir manipulation
-  void optimize();
+  void optimize_blocks();
+  void eliminate_null_checks();
   void compute_predecessors();
   void split_critical_edges();
   void compute_code();
--- a/hotspot/src/share/vm/c1/c1_Instruction.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Instruction.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -34,6 +34,15 @@
 // Implementation of Instruction
 
 
+int Instruction::dominator_depth() {
+  int result = -1;
+  if (block()) {
+    result = block()->dominator_depth();
+  }
+  assert(result != -1 || this->as_Local(), "Only locals have dominator depth -1");
+  return result;
+}
+
 Instruction::Condition Instruction::mirror(Condition cond) {
   switch (cond) {
     case eql: return eql;
@@ -42,6 +51,8 @@
     case leq: return geq;
     case gtr: return lss;
     case geq: return leq;
+    case aeq: return beq;
+    case beq: return aeq;
   }
   ShouldNotReachHere();
   return eql;
@@ -56,6 +67,8 @@
     case leq: return gtr;
     case gtr: return leq;
     case geq: return lss;
+    case aeq: assert(false, "Above equal cannot be negated");
+    case beq: assert(false, "Below equal cannot be negated");
   }
   ShouldNotReachHere();
   return eql;
@@ -70,10 +83,10 @@
   }
 }
 
-
-Instruction* Instruction::prev(BlockBegin* block) {
+// Prev without need to have BlockBegin
+Instruction* Instruction::prev() {
   Instruction* p = NULL;
-  Instruction* q = block;
+  Instruction* q = block();
   while (q != this) {
     assert(q != NULL, "this is not in the block's instruction list");
     p = q; q = q->next();
@@ -122,15 +135,24 @@
 
 // perform constant and interval tests on index value
 bool AccessIndexed::compute_needs_range_check() {
-  Constant* clength = length()->as_Constant();
-  Constant* cindex = index()->as_Constant();
-  if (clength && cindex) {
-    IntConstant* l = clength->type()->as_IntConstant();
-    IntConstant* i = cindex->type()->as_IntConstant();
-    if (l && i && i->value() < l->value() && i->value() >= 0) {
-      return false;
+
+  if (length()) {
+
+    Constant* clength = length()->as_Constant();
+    Constant* cindex = index()->as_Constant();
+    if (clength && cindex) {
+      IntConstant* l = clength->type()->as_IntConstant();
+      IntConstant* i = cindex->type()->as_IntConstant();
+      if (l && i && i->value() < l->value() && i->value() >= 0) {
+        return false;
+      }
     }
   }
+
+  if (!this->check_flag(NeedsRangeCheckFlag)) {
+    return false;
+  }
+
   return true;
 }
 
@@ -631,19 +653,25 @@
 // of the inserted block, without recomputing the values of the other blocks
 // in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
 BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
-  BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
+  int bci = sux->bci();
+  // critical edge splitting may introduce a goto after a if and array
+  // bound check elimination may insert a predicate between the if and
+  // goto. The bci of the goto can't be the one of the if otherwise
+  // the state and bci are inconsistent and a deoptimization triggered
+  // by the predicate would lead to incorrect execution/a crash.
+  BlockBegin* new_sux = new BlockBegin(bci);
 
   // mark this block (special treatment when block order is computed)
   new_sux->set(critical_edge_split_flag);
 
   // This goto is not a safepoint.
   Goto* e = new Goto(sux, false);
-  new_sux->set_next(e, end()->state()->bci());
+  new_sux->set_next(e, bci);
   new_sux->set_end(e);
   // setup states
   ValueStack* s = end()->state();
-  new_sux->set_state(s->copy());
-  e->set_state(s->copy());
+  new_sux->set_state(s->copy(s->kind(), bci));
+  e->set_state(s->copy(s->kind(), bci));
   assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!");
   assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!");
   assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!");
@@ -960,15 +988,14 @@
   BlockList* sux = NULL;
   if (begin != NULL) {
     sux = begin->successors();
-  } else if (_begin != NULL) {
+  } else if (this->begin() != NULL) {
     // copy our sux list
-    BlockList* sux = new BlockList(_begin->number_of_sux());
-    for (int i = 0; i < _begin->number_of_sux(); i++) {
-      sux->append(_begin->sux_at(i));
+    BlockList* sux = new BlockList(this->begin()->number_of_sux());
+    for (int i = 0; i < this->begin()->number_of_sux(); i++) {
+      sux->append(this->begin()->sux_at(i));
     }
   }
   _sux = sux;
-  _begin = begin;
 }
 
 
@@ -1008,7 +1035,38 @@
   }
 }
 
+#ifdef ASSERT
+// Constructor of Assert
+Assert::Assert(Value x, Condition cond, bool unordered_is_true, Value y) : Instruction(illegalType)
+  , _x(x)
+  , _cond(cond)
+  , _y(y)
+{
+  set_flag(UnorderedIsTrueFlag, unordered_is_true);
+  assert(x->type()->tag() == y->type()->tag(), "types must match");
+  pin();
 
+  stringStream strStream;
+  Compilation::current()->method()->print_name(&strStream);
+
+  stringStream strStream1;
+  InstructionPrinter ip1(1, &strStream1);
+  ip1.print_instr(x);
+
+  stringStream strStream2;
+  InstructionPrinter ip2(1, &strStream2);
+  ip2.print_instr(y);
+
+  stringStream ss;
+  ss.print("Assertion %s %s %s in method %s", strStream1.as_string(), ip2.cond_name(cond), strStream2.as_string(), strStream.as_string());
+
+  _message = ss.as_string();
+}
+#endif
+
+void RangeCheckPredicate::check_state() {
+  assert(state()->kind() != ValueStack::EmptyExceptionState && state()->kind() != ValueStack::ExceptionState, "will deopt with empty state");
+}
 
 void ProfileInvoke::state_values_do(ValueVisitor* f) {
   if (state() != NULL) state()->values_do(f);
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -110,6 +110,8 @@
 class   ProfileInvoke;
 class   RuntimeCall;
 class   MemBar;
+class   RangeCheckPredicate;
+class   Assert;
 
 // A Value is a reference to the instruction creating the value
 typedef Instruction* Value;
@@ -210,6 +212,10 @@
   virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   virtual void do_MemBar         (MemBar*          x) = 0;
+  virtual void do_RangeCheckPredicate(RangeCheckPredicate* x) = 0;
+#ifdef ASSERT
+  virtual void do_Assert         (Assert*          x) = 0;
+#endif
 };
 
 
@@ -306,8 +312,9 @@
 
   void update_exception_state(ValueStack* state);
 
- //protected:
- public:
+ protected:
+  BlockBegin*  _block;                           // Block that contains this instruction
+
   void set_type(ValueType* type) {
     assert(type != NULL, "type must exist");
     _type = type;
@@ -342,6 +349,9 @@
     ThrowIncompatibleClassChangeErrorFlag,
     ProfileMDOFlag,
     IsLinkedInBlockFlag,
+    NeedsRangeCheckFlag,
+    InWorkListFlag,
+    DeoptimizeOnException,
     InstructionLastFlag
   };
 
@@ -351,7 +361,7 @@
 
   // 'globally' used condition values
   enum Condition {
-    eql, neq, lss, leq, gtr, geq
+    eql, neq, lss, leq, gtr, geq, aeq, beq
   };
 
   // Instructions may be pinned for many reasons and under certain conditions
@@ -381,6 +391,7 @@
   , _pin_state(0)
   , _type(type)
   , _next(NULL)
+  , _block(NULL)
   , _subst(NULL)
   , _flags(0)
   , _operand(LIR_OprFact::illegalOpr)
@@ -399,11 +410,13 @@
   int printable_bci() const                      { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
   void set_printable_bci(int bci)                { _printable_bci = bci; }
 #endif
+  int dominator_depth();
   int use_count() const                          { return _use_count; }
   int pin_state() const                          { return _pin_state; }
   bool is_pinned() const                         { return _pin_state != 0 || PinAllInstructions; }
   ValueType* type() const                        { return _type; }
-  Instruction* prev(BlockBegin* block);          // use carefully, expensive operation
+  BlockBegin *block() const                      { return _block; }
+  Instruction* prev();                           // use carefully, expensive operation
   Instruction* next() const                      { return _next; }
   bool has_subst() const                         { return _subst != NULL; }
   Instruction* subst()                           { return _subst == NULL ? this : _subst->subst(); }
@@ -432,6 +445,9 @@
     assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
     assert(next->can_be_linked(), "shouldn't link these instructions into list");
 
+    BlockBegin *block = this->block();
+    next->_block = block;
+
     next->set_flag(Instruction::IsLinkedInBlockFlag, true);
     _next = next;
     return next;
@@ -444,6 +460,29 @@
     return set_next(next);
   }
 
+  // when blocks are merged
+  void fixup_block_pointers() {
+    Instruction *cur = next()->next(); // next()'s block is set in set_next
+    while (cur && cur->_block != block()) {
+      cur->_block = block();
+      cur = cur->next();
+    }
+  }
+
+  Instruction *insert_after(Instruction *i) {
+    Instruction* n = _next;
+    set_next(i);
+    i->set_next(n);
+    return _next;
+  }
+
+  Instruction *insert_after_same_bci(Instruction *i) {
+#ifndef PRODUCT
+    i->set_printable_bci(printable_bci());
+#endif
+    return insert_after(i);
+  }
+
   void set_subst(Instruction* subst)             {
     assert(subst == NULL ||
            type()->base() == subst->type()->base() ||
@@ -452,6 +491,7 @@
   }
   void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; }
   void set_exception_state(ValueStack* s)        { check_state(s); _exception_state = s; }
+  void set_state_before(ValueStack* s)           { check_state(s); _state_before = s; }
 
   // machine-specifics
   void set_operand(LIR_Opr operand)              { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; }
@@ -509,6 +549,11 @@
   virtual ExceptionObject*  as_ExceptionObject() { return NULL; }
   virtual UnsafeOp*         as_UnsafeOp()        { return NULL; }
   virtual ProfileInvoke*    as_ProfileInvoke()   { return NULL; }
+  virtual RangeCheckPredicate* as_RangeCheckPredicate() { return NULL; }
+
+#ifdef ASSERT
+  virtual Assert*           as_Assert()          { return NULL; }
+#endif
 
   virtual void visit(InstructionVisitor* v)      = 0;
 
@@ -570,7 +615,6 @@
 
 LEAF(Phi, Instruction)
  private:
-  BlockBegin* _block;    // the block to which the phi function belongs
   int         _pf_flags; // the flags of the phi function
   int         _index;    // to value on operand stack (index < 0) or to local
  public:
@@ -578,9 +622,9 @@
   Phi(ValueType* type, BlockBegin* b, int index)
   : Instruction(type->base())
   , _pf_flags(0)
-  , _block(b)
   , _index(index)
   {
+    _block = b;
     NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
     if (type->is_illegal()) {
       make_illegal();
@@ -603,8 +647,6 @@
   Value operand_at(int i) const;
   int   operand_count() const;
 
-  BlockBegin* block() const       { return _block; }
-
   void   set(Flag f)              { _pf_flags |=  f; }
   void   clear(Flag f)            { _pf_flags &= ~f; }
   bool   is_set(Flag f) const     { return (_pf_flags & f) != 0; }
@@ -670,6 +712,7 @@
     pin();
   }
 
+  // generic
   virtual bool can_trap() const                  { return state_before() != NULL; }
   virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
 
@@ -852,6 +895,7 @@
   , _length(length)
   , _elt_type(elt_type)
   {
+    set_flag(Instruction::NeedsRangeCheckFlag, true);
     ASSERT_VALUES
   }
 
@@ -860,6 +904,7 @@
   Value length() const                           { return _length; }
   BasicType elt_type() const                     { return _elt_type; }
 
+  void clear_length()                            { _length = NULL; }
   // perform elimination of range checks involving constants
   bool compute_needs_range_check();
 
@@ -1524,6 +1569,7 @@
   int        _bci;                               // start-bci of block
   int        _depth_first_number;                // number of this block in a depth-first ordering
   int        _linear_scan_number;                // number of this block in linear-scan ordering
+  int        _dominator_depth;
   int        _loop_depth;                        // the loop nesting level of this block
   int        _loop_index;                        // number of the innermost loop of this block
   int        _flags;                             // the flags associated with this block
@@ -1535,6 +1581,7 @@
   // SSA specific fields: (factor out later)
   BlockList   _successors;                       // the successors of this block
   BlockList   _predecessors;                     // the predecessors of this block
+  BlockList   _dominates;                        // list of blocks that are dominated by this block
   BlockBegin* _dominator;                        // the dominator of this block
   // SSA specific ends
   BlockEnd*  _end;                               // the last instruction of this block
@@ -1583,10 +1630,12 @@
   , _linear_scan_number(-1)
   , _loop_depth(0)
   , _flags(0)
+  , _dominator_depth(-1)
   , _dominator(NULL)
   , _end(NULL)
   , _predecessors(2)
   , _successors(2)
+  , _dominates(2)
   , _exception_handlers(1)
   , _exception_states(NULL)
   , _exception_handler_pco(-1)
@@ -1603,6 +1652,7 @@
   , _total_preds(0)
   , _stores_to_locals()
   {
+    _block = this;
 #ifndef PRODUCT
     set_printable_bci(bci);
 #endif
@@ -1612,8 +1662,10 @@
   int block_id() const                           { return _block_id; }
   int bci() const                                { return _bci; }
   BlockList* successors()                        { return &_successors; }
+  BlockList* dominates()                         { return &_dominates; }
   BlockBegin* dominator() const                  { return _dominator; }
   int loop_depth() const                         { return _loop_depth; }
+  int dominator_depth() const                    { return _dominator_depth; }
   int depth_first_number() const                 { return _depth_first_number; }
   int linear_scan_number() const                 { return _linear_scan_number; }
   BlockEnd* end() const                          { return _end; }
@@ -1634,6 +1686,7 @@
   // manipulation
   void set_dominator(BlockBegin* dom)            { _dominator = dom; }
   void set_loop_depth(int d)                     { _loop_depth = d; }
+  void set_dominator_depth(int d)                { _dominator_depth = d; }
   void set_depth_first_number(int dfn)           { _depth_first_number = dfn; }
   void set_linear_scan_number(int lsn)           { _linear_scan_number = lsn; }
   void set_end(BlockEnd* end);
@@ -1695,7 +1748,8 @@
     parser_loop_header_flag       = 1 << 7,  // set by parser to identify blocks where phi functions can not be created on demand
     critical_edge_split_flag      = 1 << 8, // set for all blocks that are introduced when critical edges are split
     linear_scan_loop_header_flag  = 1 << 9, // set during loop-detection for LinearScan
-    linear_scan_loop_end_flag     = 1 << 10  // set during loop-detection for LinearScan
+    linear_scan_loop_end_flag     = 1 << 10, // set during loop-detection for LinearScan
+    donot_eliminate_range_checks  = 1 << 11  // Should be try to eliminate range checks in this block
   };
 
   void set(Flag f)                               { _flags |= f; }
@@ -1728,7 +1782,6 @@
 
 BASE(BlockEnd, StateSplit)
  private:
-  BlockBegin* _begin;
   BlockList*  _sux;
 
  protected:
@@ -1746,7 +1799,6 @@
   // creation
   BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint)
   : StateSplit(type, state_before)
-  , _begin(NULL)
   , _sux(NULL)
   {
     set_flag(IsSafepointFlag, is_safepoint);
@@ -1754,7 +1806,8 @@
 
   // accessors
   bool is_safepoint() const                      { return check_flag(IsSafepointFlag); }
-  BlockBegin* begin() const                      { return _begin; }
+  // For compatibility with old code, for new code use block()
+  BlockBegin* begin() const                      { return _block; }
 
   // manipulation
   void set_begin(BlockBegin* begin);
@@ -1811,6 +1864,74 @@
   void set_direction(Direction d)                { _direction = d; }
 };
 
+#ifdef ASSERT
+LEAF(Assert, Instruction)
+  private:
+  Value       _x;
+  Condition   _cond;
+  Value       _y;
+  char        *_message;
+
+ public:
+  // creation
+  // unordered_is_true is valid for float/double compares only
+   Assert(Value x, Condition cond, bool unordered_is_true, Value y);
+
+  // accessors
+  Value x() const                                { return _x; }
+  Condition cond() const                         { return _cond; }
+  bool unordered_is_true() const                 { return check_flag(UnorderedIsTrueFlag); }
+  Value y() const                                { return _y; }
+  const char *message() const                    { return _message; }
+
+  // generic
+  virtual void input_values_do(ValueVisitor* f)  { f->visit(&_x); f->visit(&_y); }
+};
+#endif
+
+LEAF(RangeCheckPredicate, StateSplit)
+ private:
+  Value       _x;
+  Condition   _cond;
+  Value       _y;
+
+  void check_state();
+
+ public:
+  // creation
+  // unordered_is_true is valid for float/double compares only
+   RangeCheckPredicate(Value x, Condition cond, bool unordered_is_true, Value y, ValueStack* state) : StateSplit(illegalType)
+  , _x(x)
+  , _cond(cond)
+  , _y(y)
+  {
+    ASSERT_VALUES
+    set_flag(UnorderedIsTrueFlag, unordered_is_true);
+    assert(x->type()->tag() == y->type()->tag(), "types must match");
+    this->set_state(state);
+    check_state();
+  }
+
+  // Always deoptimize
+  RangeCheckPredicate(ValueStack* state) : StateSplit(illegalType)
+  {
+    this->set_state(state);
+    _x = _y = NULL;
+    check_state();
+  }
+
+  // accessors
+  Value x() const                                { return _x; }
+  Condition cond() const                         { return _cond; }
+  bool unordered_is_true() const                 { return check_flag(UnorderedIsTrueFlag); }
+  Value y() const                                { return _y; }
+
+  void always_fail()                             { _x = _y = NULL; }
+
+  // generic
+  virtual void input_values_do(ValueVisitor* f)  { StateSplit::input_values_do(f); f->visit(&_x); f->visit(&_y); }
+  HASHING3(RangeCheckPredicate, true, x()->subst(), y()->subst(), cond())
+};
 
 LEAF(If, BlockEnd)
  private:
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -57,6 +57,8 @@
     case If::leq: return "<=";
     case If::gtr: return ">";
     case If::geq: return ">=";
+    case If::aeq: return "|>=|";
+    case If::beq: return "|<=|";
   }
   ShouldNotReachHere();
   return NULL;
@@ -181,6 +183,11 @@
   output()->put('[');
   print_value(indexed->index());
   output()->put(']');
+  if (indexed->length() != NULL) {
+    output()->put('(');
+    print_value(indexed->length());
+    output()->put(')');
+  }
 }
 
 
@@ -373,6 +380,7 @@
 void InstructionPrinter::do_LoadField(LoadField* x) {
   print_field(x);
   output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
+  output()->print(" %s", x->field()->name()->as_utf8());
 }
 
 
@@ -381,6 +389,7 @@
   output()->print(" := ");
   print_value(x->value());
   output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
+  output()->print(" %s", x->field()->name()->as_utf8());
 }
 
 
@@ -393,6 +402,9 @@
 void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) {
   print_indexed(x);
   output()->print(" (%c)", type2char(x->elt_type()));
+  if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
+    output()->print(" [rc]");
+  }
 }
 
 
@@ -401,6 +413,9 @@
   output()->print(" := ");
   print_value(x->value());
   output()->print(" (%c)", type2char(x->elt_type()));
+  if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
+    output()->print(" [rc]");
+  }
 }
 
 void InstructionPrinter::do_NegateOp(NegateOp* x) {
@@ -843,6 +858,25 @@
   output()->put(')');
 }
 
+void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) {
+
+  if (x->x() != NULL && x->y() != NULL) {
+    output()->print("if ");
+    print_value(x->x());
+    output()->print(" %s ", cond_name(x->cond()));
+    print_value(x->y());
+    output()->print(" then deoptimize!");
+  } else {
+    output()->print("always deoptimize!");
+  }
+}
+
+void InstructionPrinter::do_Assert(Assert* x) {
+  output()->print("assert ");
+  print_value(x->x());
+  output()->print(" %s ", cond_name(x->cond()));
+  print_value(x->y());
+}
 
 void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
   print_unsafe_object_op(x, "UnsafePrefetchWrite");
--- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -135,6 +135,8 @@
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
+  virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+  virtual void do_Assert         (Assert*          x);
 };
 #endif // PRODUCT
 
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -633,6 +633,7 @@
     case lir_ushr:
     case lir_xadd:
     case lir_xchg:
+    case lir_assert:
     {
       assert(op->as_Op2() != NULL, "must be");
       LIR_Op2* op2 = (LIR_Op2*)op;
@@ -1112,6 +1113,11 @@
   }
 }
 
+#ifdef ASSERT
+void LIR_OpAssert::emit_code(LIR_Assembler* masm) {
+  masm->emit_assert(this);
+}
+#endif
 
 void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
   masm->emit_delay(this);
@@ -1771,6 +1777,8 @@
      case lir_cas_int:               s = "cas_int";      break;
      // LIR_OpProfileCall
      case lir_profile_call:          s = "profile_call";  break;
+     // LIR_OpAssert
+     case lir_assert:                s = "assert";        break;
      case lir_none:                  ShouldNotReachHere();break;
     default:                         s = "illegal_op";    break;
   }
@@ -2017,6 +2025,13 @@
   out->print("[lbl:0x%x]", stub()->entry());
 }
 
+void LIR_OpAssert::print_instr(outputStream* out) const {
+  print_condition(out, condition()); out->print(" ");
+  in_opr1()->print(out);             out->print(" ");
+  in_opr2()->print(out);             out->print(", \"");
+  out->print(msg());                 out->print("\"");
+}
+
 
 void LIR_OpDelay::print_instr(outputStream* out) const {
   _op->print_on(out);
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -881,6 +881,7 @@
 class    LIR_OpTypeCheck;
 class    LIR_OpCompareAndSwap;
 class    LIR_OpProfileCall;
+class    LIR_OpAssert;
 
 
 // LIR operation codes
@@ -1000,6 +1001,9 @@
   , begin_opMDOProfile
     , lir_profile_call
   , end_opMDOProfile
+  , begin_opAssert
+    , lir_assert
+  , end_opAssert
 };
 
 
@@ -1135,6 +1139,7 @@
   virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
+  virtual LIR_OpAssert* as_OpAssert() { return NULL; }
 
   virtual void verify() const {}
 };
@@ -1623,7 +1628,7 @@
     , _tmp3(LIR_OprFact::illegalOpr)
     , _tmp4(LIR_OprFact::illegalOpr)
     , _tmp5(LIR_OprFact::illegalOpr) {
-    assert(code == lir_cmp, "code check");
+    assert(code == lir_cmp || code == lir_assert, "code check");
   }
 
   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
@@ -1683,7 +1688,7 @@
   LIR_Opr tmp4_opr() const                       { return _tmp4; }
   LIR_Opr tmp5_opr() const                       { return _tmp5; }
   LIR_Condition condition() const  {
-    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
+    assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
   }
   void set_condition(LIR_Condition condition) {
     assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove");  _condition = condition;
@@ -1823,6 +1828,30 @@
   CodeEmitInfo* call_info() const { return info(); }
 };
 
+#ifdef ASSERT
+// LIR_OpAssert
+class LIR_OpAssert : public LIR_Op2 {
+ friend class LIR_OpVisitState;
+
+ private:
+  const char* _msg;
+  bool        _halt;
+
+ public:
+  LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
+    : LIR_Op2(lir_assert, condition, opr1, opr2)
+    , _halt(halt)
+    , _msg(msg) {
+  }
+
+  const char* msg() const                        { return _msg; }
+  bool        halt() const                       { return _halt; }
+
+  virtual void emit_code(LIR_Assembler* masm);
+  virtual LIR_OpAssert* as_OpAssert()            { return this; }
+  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
+#endif
 
 // LIR_OpCompareAndSwap
 class LIR_OpCompareAndSwap : public LIR_Op {
@@ -2196,6 +2225,9 @@
 
   void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
   void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
+#ifdef ASSERT
+  void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
+#endif
 };
 
 void print_LIR(BlockList* blocks);
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -210,6 +210,9 @@
   void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
   void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info);
   void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op);
+#ifdef ASSERT
+  void emit_assert(LIR_OpAssert* op);
+#endif
 
   void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);
 
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -403,6 +403,10 @@
 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
   assert(state != NULL, "state must be defined");
 
+#ifndef PRODUCT
+  state->verify();
+#endif
+
   ValueStack* s = state;
   for_each_state(s) {
     if (s->kind() == ValueStack::EmptyExceptionState) {
@@ -453,7 +457,7 @@
     }
   }
 
-  return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
+  return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 }
 
 
@@ -1792,11 +1796,18 @@
   }
 #endif
 
+  bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
   if (x->needs_null_check() &&
       (needs_patching ||
-       MacroAssembler::needs_explicit_null_check(x->offset()))) {
+       MacroAssembler::needs_explicit_null_check(x->offset()) ||
+       stress_deopt)) {
+    LIR_Opr obj = object.result();
+    if (stress_deopt) {
+      obj = new_register(T_OBJECT);
+      __ move(LIR_OprFact::oopConst(NULL), obj);
+    }
     // emit an explicit null check because the offset is too large
-    __ null_check(object.result(), new CodeEmitInfo(info));
+    __ null_check(obj, new CodeEmitInfo(info));
   }
 
   LIR_Opr reg = rlock_result(x, field_type);
@@ -1873,6 +1884,11 @@
     } else {
       info = state_for(nc);
     }
+    if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
+      LIR_Opr obj = new_register(T_OBJECT);
+      __ move(LIR_OprFact::oopConst(NULL), obj);
+      __ null_check(obj, new CodeEmitInfo(info));
+    }
   }
   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
 }
@@ -1883,14 +1899,11 @@
   LIRItem array(x->array(), this);
   LIRItem index(x->index(), this);
   LIRItem length(this);
-  bool needs_range_check = true;
-
-  if (use_length) {
-    needs_range_check = x->compute_needs_range_check();
-    if (needs_range_check) {
-      length.set_instruction(x->length());
-      length.load_item();
-    }
+  bool needs_range_check = x->compute_needs_range_check();
+
+  if (use_length && needs_range_check) {
+    length.set_instruction(x->length());
+    length.load_item();
   }
 
   array.load_item();
@@ -1910,13 +1923,20 @@
     } else {
       null_check_info = range_check_info;
     }
+    if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
+      LIR_Opr obj = new_register(T_OBJECT);
+      __ move(LIR_OprFact::oopConst(NULL), obj);
+      __ null_check(obj, new CodeEmitInfo(null_check_info));
+    }
   }
 
   // emit array address setup early so it schedules better
   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
 
   if (GenerateRangeChecks && needs_range_check) {
-    if (use_length) {
+    if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
+      __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
+    } else if (use_length) {
       // TODO: use a (modified) version of array_range_check that does not require a
       //       constant length to be loaded to a register
       __ cmp(lir_cond_belowEqual, length.result(), index.result());
@@ -2634,7 +2654,7 @@
       LIR_Opr lock = new_register(T_INT);
       __ load_stack_address_monitor(0, lock);
 
-      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
+      CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
 
       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
@@ -2644,7 +2664,7 @@
 
   // increment invocation counters if needed
   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
-    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
+    CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
     increment_invocation_counter(info);
   }
 
@@ -3102,6 +3122,95 @@
   }
 }
 
+void LIRGenerator::do_Assert(Assert *x) {
+#ifdef ASSERT
+  ValueTag tag = x->x()->type()->tag();
+  If::Condition cond = x->cond();
+
+  LIRItem xitem(x->x(), this);
+  LIRItem yitem(x->y(), this);
+  LIRItem* xin = &xitem;
+  LIRItem* yin = &yitem;
+
+  assert(tag == intTag, "Only integer assertions are valid!");
+
+  xin->load_item();
+  yin->dont_load_item();
+
+  set_no_result(x);
+
+  LIR_Opr left = xin->result();
+  LIR_Opr right = yin->result();
+
+  __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
+#endif
+}
+
+
+void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
+
+
+  Instruction *a = x->x();
+  Instruction *b = x->y();
+  if (!a || StressRangeCheckElimination) {
+    assert(!b || StressRangeCheckElimination, "B must also be null");
+
+    CodeEmitInfo *info = state_for(x, x->state());
+    CodeStub* stub = new PredicateFailedStub(info);
+
+    __ jump(stub);
+  } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
+    int a_int = a->type()->as_IntConstant()->value();
+    int b_int = b->type()->as_IntConstant()->value();
+
+    bool ok = false;
+
+    switch(x->cond()) {
+      case Instruction::eql: ok = (a_int == b_int); break;
+      case Instruction::neq: ok = (a_int != b_int); break;
+      case Instruction::lss: ok = (a_int < b_int); break;
+      case Instruction::leq: ok = (a_int <= b_int); break;
+      case Instruction::gtr: ok = (a_int > b_int); break;
+      case Instruction::geq: ok = (a_int >= b_int); break;
+      case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
+      case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
+      default: ShouldNotReachHere();
+    }
+
+    if (ok) {
+
+      CodeEmitInfo *info = state_for(x, x->state());
+      CodeStub* stub = new PredicateFailedStub(info);
+
+      __ jump(stub);
+    }
+  } else {
+
+    ValueTag tag = x->x()->type()->tag();
+    If::Condition cond = x->cond();
+    LIRItem xitem(x->x(), this);
+    LIRItem yitem(x->y(), this);
+    LIRItem* xin = &xitem;
+    LIRItem* yin = &yitem;
+
+    assert(tag == intTag, "Only integer deoptimizations are valid!");
+
+    xin->load_item();
+    yin->dont_load_item();
+    set_no_result(x);
+
+    LIR_Opr left = xin->result();
+    LIR_Opr right = yin->result();
+
+    CodeEmitInfo *info = state_for(x, x->state());
+    CodeStub* stub = new PredicateFailedStub(info);
+
+    __ cmp(lir_cond(cond), left, right);
+    __ branch(lir_cond(cond), right->type(), stub);
+  }
+}
+
+
 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
   LIRItemList args(1);
   LIRItem value(arg1, this);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -412,6 +412,8 @@
     case If::leq: l = lir_cond_lessEqual;    break;
     case If::geq: l = lir_cond_greaterEqual; break;
     case If::gtr: l = lir_cond_greater;      break;
+    case If::aeq: l = lir_cond_aboveEqual;   break;
+    case If::beq: l = lir_cond_belowEqual;   break;
     };
     return l;
   }
@@ -534,6 +536,8 @@
   virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   virtual void do_RuntimeCall    (RuntimeCall*     x);
   virtual void do_MemBar         (MemBar*          x);
+  virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
+  virtual void do_Assert         (Assert*          x);
 };
 
 
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -6231,26 +6231,29 @@
             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
 
-            LIR_Op2* prev_cmp = NULL;
-
-            for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
-              prev_op = instructions->at(j);
-              if(prev_op->code() == lir_cmp) {
-                assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
-                prev_cmp = (LIR_Op2*)prev_op;
-                assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
+            if (prev_branch->stub() == NULL) {
+
+              LIR_Op2* prev_cmp = NULL;
+
+              for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
+                prev_op = instructions->at(j);
+                if (prev_op->code() == lir_cmp) {
+                  assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
+                  prev_cmp = (LIR_Op2*)prev_op;
+                  assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
+                }
               }
-            }
-            assert(prev_cmp != NULL, "should have found comp instruction for branch");
-            if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
-
-              TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
-
-              // eliminate a conditional branch to the immediate successor
-              prev_branch->change_block(last_branch->block());
-              prev_branch->negate_cond();
-              prev_cmp->set_condition(prev_branch->cond());
-              instructions->truncate(instructions->length() - 1);
+              assert(prev_cmp != NULL, "should have found comp instruction for branch");
+              if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
+
+                TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
+
+                // eliminate a conditional branch to the immediate successor
+                prev_branch->change_block(last_branch->block());
+                prev_branch->negate_cond();
+                prev_cmp->set_condition(prev_branch->cond());
+                instructions->truncate(instructions->length() - 1);
+              }
             }
           }
         }
--- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -178,7 +178,7 @@
   // 2) substitute conditional expression
   //    with an IfOp followed by a Goto
   // cut if_ away and get node before
-  Instruction* cur_end = if_->prev(block);
+  Instruction* cur_end = if_->prev();
 
   // append constants of true- and false-block if necessary
   // clone constants because original block must not be destroyed
@@ -202,7 +202,7 @@
   }
 
   // append Goto to successor
-  ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
+  ValueStack* state_before = if_->state_before();
   Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
 
   // prepare state for Goto
@@ -367,10 +367,11 @@
 #endif
 
         // find instruction before end & append first instruction of sux block
-        Instruction* prev = end->prev(block);
+        Instruction* prev = end->prev();
         Instruction* next = sux->next();
         assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
         prev->set_next(next);
+        prev->fixup_block_pointers();
         sux->disconnect_from_graph();
         block->set_end(sux->end());
         // add exception handlers of deleted block, if any
@@ -533,6 +534,8 @@
   void do_ProfileInvoke  (ProfileInvoke*   x);
   void do_RuntimeCall    (RuntimeCall*     x);
   void do_MemBar         (MemBar*          x);
+  void do_RangeCheckPredicate(RangeCheckPredicate* x);
+  void do_Assert         (Assert*          x);
 };
 
 
@@ -714,6 +717,8 @@
 void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
 void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
 void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
+void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
+void NullCheckVisitor::do_Assert         (Assert*          x) {}
 
 
 void NullCheckEliminator::visit(Value* p) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -0,0 +1,1517 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_ValueStack.hpp"
+#include "c1/c1_RangeCheckElimination.hpp"
+#include "c1/c1_IR.hpp"
+#include "c1/c1_Canonicalizer.hpp"
+#include "c1/c1_ValueMap.hpp"
+#include "ci/ciMethodData.hpp"
+#include "runtime/deoptimization.hpp"
+
+// Macros for the Trace and the Assertion flag
+#ifdef ASSERT
+#define TRACE_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination) { code; }
+#define ASSERT_RANGE_CHECK_ELIMINATION(code) if (AssertRangeCheckElimination) { code; }
+#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code) if (TraceRangeCheckElimination || AssertRangeCheckElimination) { code; }
+#else
+#define TRACE_RANGE_CHECK_ELIMINATION(code)
+#define ASSERT_RANGE_CHECK_ELIMINATION(code)
+#define TRACE_OR_ASSERT_RANGE_CHECK_ELIMINATION(code)
+#endif
+
+// Entry point for the optimization
+void RangeCheckElimination::eliminate(IR *ir) {
+  bool do_elimination = ir->compilation()->has_access_indexed();
+  ASSERT_RANGE_CHECK_ELIMINATION(do_elimination = true);
+  if (do_elimination) {
+    RangeCheckEliminator rce(ir);
+  }
+}
+
+// Constructor
+RangeCheckEliminator::RangeCheckEliminator(IR *ir) :
+  _bounds(Instruction::number_of_instructions(), NULL),
+  _access_indexed_info(Instruction::number_of_instructions(), NULL)
+{
+  _visitor.set_range_check_eliminator(this);
+  _ir = ir;
+  _number_of_instructions = Instruction::number_of_instructions();
+  _optimistic = ir->compilation()->is_optimistic();
+
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("");
+    tty->print_cr("Range check elimination");
+    ir->method()->print_name(tty);
+    tty->print_cr("");
+  );
+
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("optimistic=%d", (int)_optimistic);
+  );
+
+#ifdef ASSERT
+  // Verifies several conditions that must be true on the IR-input. Only used for debugging purposes.
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("Verification of IR . . .");
+  );
+  Verification verification(ir);
+#endif
+
+  // Set process block flags
+  // Optimization so a blocks is only processed if it contains an access indexed instruction or if
+  // one of its children in the dominator tree contains an access indexed instruction.
+  set_process_block_flags(ir->start());
+
+  // Pass over instructions in the dominator tree
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("Starting pass over dominator tree . . .")
+  );
+  calc_bounds(ir->start(), NULL);
+
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("Finished!")
+  );
+}
+
+// Instruction specific work for some instructions
+// Constant
+void RangeCheckEliminator::Visitor::do_Constant(Constant *c) {
+  IntConstant *ic = c->type()->as_IntConstant();
+  if (ic != NULL) {
+    int value = ic->value();
+    _bound = new Bound(value, NULL, value, NULL);
+  }
+}
+
+// LogicOp
+void RangeCheckEliminator::Visitor::do_LogicOp(LogicOp *lo) {
+  if (lo->type()->as_IntType() && lo->op() == Bytecodes::_iand && (lo->x()->as_Constant() || lo->y()->as_Constant())) {
+    int constant = 0;
+    Constant *c = lo->x()->as_Constant();
+    if (c != NULL) {
+      constant = c->type()->as_IntConstant()->value();
+    } else {
+      constant = lo->y()->as_Constant()->type()->as_IntConstant()->value();
+    }
+    if (constant >= 0) {
+      _bound = new Bound(0, NULL, constant, NULL);
+    }
+  }
+}
+
+// Phi
+void RangeCheckEliminator::Visitor::do_Phi(Phi *phi) {
+  if (!phi->type()->as_IntType() && !phi->type()->as_ObjectType()) return;
+
+  BlockBegin *block = phi->block();
+  int op_count = phi->operand_count();
+  bool has_upper = true;
+  bool has_lower = true;
+  assert(phi, "Phi must not be null");
+  Bound *bound = NULL;
+
+  // TODO: support more difficult phis
+  for (int i=0; i<op_count; i++) {
+    Value v = phi->operand_at(i);
+
+    if (v == phi) continue;
+
+    // Check if instruction is connected with phi itself
+    Op2 *op2 = v->as_Op2();
+    if (op2 != NULL) {
+      Value x = op2->x();
+      Value y = op2->y();
+      if ((x == phi || y == phi)) {
+        Value other = x;
+        if (other == phi) {
+          other = y;
+        }
+        ArithmeticOp *ao = v->as_ArithmeticOp();
+        if (ao != NULL && ao->op() == Bytecodes::_iadd) {
+          assert(ao->op() == Bytecodes::_iadd, "Has to be add!");
+          if (ao->type()->as_IntType()) {
+            Constant *c = other->as_Constant();
+            if (c != NULL) {
+              assert(c->type()->as_IntConstant(), "Constant has to be of type integer");
+              int value = c->type()->as_IntConstant()->value();
+              if (value == 1) {
+                has_upper = false;
+              } else if (value > 1) {
+                // Overflow not guaranteed
+                has_upper = false;
+                has_lower = false;
+              } else if (value < 0) {
+                has_lower = false;
+              }
+              continue;
+            }
+          }
+        }
+      }
+    }
+
+    // No connection -> new bound
+    Bound *v_bound = _rce->get_bound(v);
+    Bound *cur_bound;
+    int cur_constant = 0;
+    Value cur_value = v;
+
+    if (v->type()->as_IntConstant()) {
+      cur_constant = v->type()->as_IntConstant()->value();
+      cur_value = NULL;
+    }
+    if (!v_bound->has_upper() || !v_bound->has_lower()) {
+      cur_bound = new Bound(cur_constant, cur_value, cur_constant, cur_value);
+    } else {
+      cur_bound = v_bound;
+    }
+    if (cur_bound) {
+      if (!bound) {
+        bound = cur_bound->copy();
+      } else {
+        bound->or_op(cur_bound);
+      }
+    } else {
+      // No bound!
+      bound = NULL;
+      break;
+    }
+  }
+
+  if (bound) {
+    if (!has_upper) {
+      bound->remove_upper();
+    }
+    if (!has_lower) {
+      bound->remove_lower();
+    }
+    _bound = bound;
+  } else {
+    _bound = new Bound();
+  }
+}
+
+
+// ArithmeticOp
+void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) {
+  Value x = ao->x();
+  Value y = ao->y();
+
+  if (ao->op() == Bytecodes::_irem) {
+    Bound* x_bound = _rce->get_bound(x);
+    Bound* y_bound = _rce->get_bound(y);
+    if (x_bound->lower() >= 0 && x_bound->lower_instr() == NULL && y->as_ArrayLength() != NULL) {
+      _bound = new Bound(0, NULL, -1, y);
+    } else {
+      _bound = new Bound();
+    }
+  } else if (!x->as_Constant() || !y->as_Constant()) {
+    assert(!x->as_Constant() || !y->as_Constant(), "One of the operands must be non-constant!");
+    if (((x->as_Constant() || y->as_Constant()) && (ao->op() == Bytecodes::_iadd)) || (y->as_Constant() && ao->op() == Bytecodes::_isub)) {
+      assert(ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub, "Operand must be iadd or isub");
+
+      if (y->as_Constant()) {
+        Value tmp = x;
+        x = y;
+        y = tmp;
+      }
+      assert(x->as_Constant()->type()->as_IntConstant(), "Constant must be int constant!");
+
+      // Constant now in x
+      int const_value = x->as_Constant()->type()->as_IntConstant()->value();
+      if (ao->op() == Bytecodes::_iadd || const_value != min_jint) {
+        if (ao->op() == Bytecodes::_isub) {
+          const_value = -const_value;
+        }
+
+        Bound * bound = _rce->get_bound(y);
+        if (bound->has_upper() && bound->has_lower()) {
+          int new_lower = bound->lower() + const_value;
+          jlong new_lowerl = ((jlong)bound->lower()) + const_value;
+          int new_upper = bound->upper() + const_value;
+          jlong new_upperl = ((jlong)bound->upper()) + const_value;
+
+          if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) {
+            Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr());
+            _bound = newBound;
+          } else {
+            // overflow
+            _bound = new Bound();
+          }
+        } else {
+          _bound = new Bound();
+        }
+      } else {
+        _bound = new Bound();
+      }
+    } else {
+      Bound *bound = _rce->get_bound(x);
+      if (ao->op() == Bytecodes::_isub) {
+        if (bound->lower_instr() == y) {
+          _bound = new Bound(Instruction::geq, NULL, bound->lower());
+        } else {
+          _bound = new Bound();
+        }
+      } else {
+        _bound = new Bound();
+      }
+    }
+  }
+}
+
+// IfOp
+void RangeCheckEliminator::Visitor::do_IfOp(IfOp *ifOp)
+{
+  if (ifOp->tval()->type()->as_IntConstant() && ifOp->fval()->type()->as_IntConstant()) {
+    int min = ifOp->tval()->type()->as_IntConstant()->value();
+    int max = ifOp->fval()->type()->as_IntConstant()->value();
+    if (min > max) {
+      // min ^= max ^= min ^= max;
+      int tmp = min;
+      min = max;
+      max = tmp;
+    }
+    _bound = new Bound(min, NULL, max, NULL);
+  }
+}
+
+// Get bound. Returns the current bound on Value v. Normally this is the topmost element on the bound stack.
+RangeCheckEliminator::Bound *RangeCheckEliminator::get_bound(Value v) {
+  // Wrong type or NULL -> No bound
+  if (!v || (!v->type()->as_IntType() && !v->type()->as_ObjectType())) return NULL;
+
+  if (!_bounds[v->id()]) {
+    // First (default) bound is calculated
+    // Create BoundStack
+    _bounds[v->id()] = new BoundStack();
+    _visitor.clear_bound();
+    Value visit_value = v;
+    visit_value->visit(&_visitor);
+    Bound *bound = _visitor.bound();
+    if (bound) {
+      _bounds[v->id()]->push(bound);
+    }
+    if (_bounds[v->id()]->length() == 0) {
+      assert(!(v->as_Constant() && v->type()->as_IntConstant()), "constants not handled here");
+      _bounds[v->id()]->push(new Bound());
+    }
+  } else if (_bounds[v->id()]->length() == 0) {
+    // To avoid endless loops, bound is currently in calculation -> nothing known about it
+    return new Bound();
+  }
+
+  // Return bound
+  return _bounds[v->id()]->top();
+}
+
+// Update bound
+void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant) {
+  if (cond == Instruction::gtr) {
+    cond = Instruction::geq;
+    constant++;
+  } else if (cond == Instruction::lss) {
+    cond = Instruction::leq;
+    constant--;
+  }
+  Bound *bound = new Bound(cond, value, constant);
+  update_bound(pushed, v, bound);
+}
+
+// Checks for loop invariance. Returns true if the instruction is outside of the loop which is identified by loop_header.
+bool RangeCheckEliminator::loop_invariant(BlockBegin *loop_header, Instruction *instruction) {
+  assert(loop_header, "Loop header must not be null!");
+  if (!instruction) return true;
+  return instruction->dominator_depth() < loop_header->dominator_depth();
+}
+
+// Update bound. Pushes a new bound onto the stack. Tries to do a conjunction with the current bound.
+void RangeCheckEliminator::update_bound(IntegerStack &pushed, Value v, Bound *bound) {
+  if (v->as_Constant()) {
+    // No bound update for constants
+    return;
+  }
+  if (!_bounds[v->id()]) {
+    get_bound(v);
+    assert(_bounds[v->id()], "Now Stack must exist");
+  }
+  Bound *top = NULL;
+  if (_bounds[v->id()]->length() > 0) {
+    top = _bounds[v->id()]->top();
+  }
+  if (top) {
+    bound->and_op(top);
+  }
+  _bounds[v->id()]->push(bound);
+  pushed.append(v->id());
+}
+
+// Add instruction + idx for in block motion
+void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int idx, Value instruction, AccessIndexed *ai) {
+  int id = instruction->id();
+  AccessIndexedInfo *aii = _access_indexed_info[id];
+  if (aii == NULL) {
+    aii = new AccessIndexedInfo();
+    _access_indexed_info[id] = aii;
+    indices.append(instruction);
+    aii->_min = idx;
+    aii->_max = idx;
+    aii->_list = new AccessIndexedList();
+  } else if (idx >= aii->_min && idx <= aii->_max) {
+    remove_range_check(ai);
+    return;
+  }
+  aii->_min = MIN2(aii->_min, idx);
+  aii->_max = MAX2(aii->_max, idx);
+  aii->_list->append(ai);
+}
+
+// In block motion. Tries to reorder checks in order to reduce some of them.
+// Example:
+// a[i] = 0;
+// a[i+2] = 0;
+// a[i+1] = 0;
+// In this example the check for a[i+1] would be considered as unnecessary during the first iteration.
+// After this i is only checked once for i >= 0 and i+2 < a.length before the first array access. If this
+// check fails, deoptimization is called.
+void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays) {
+  InstructionList indices;
+
+  // Now iterate over all arrays
+  for (int i=0; i<arrays.length(); i++) {
+    int max_constant = -1;
+    AccessIndexedList list_constant;
+    Value array = arrays.at(i);
+
+    // For all AccessIndexed-instructions in this block concerning the current array.
+    for(int j=0; j<accessIndexed.length(); j++) {
+      AccessIndexed *ai = accessIndexed.at(j);
+      if (ai->array() != array || !ai->check_flag(Instruction::NeedsRangeCheckFlag)) continue;
+
+      Value index = ai->index();
+      Constant *c = index->as_Constant();
+      if (c != NULL) {
+        int constant_value = c->type()->as_IntConstant()->value();
+        if (constant_value >= 0) {
+          if (constant_value <= max_constant) {
+            // No range check needed for this
+            remove_range_check(ai);
+          } else {
+            max_constant = constant_value;
+            list_constant.append(ai);
+          }
+        }
+      } else {
+        int last_integer = 0;
+        Instruction *last_instruction = index;
+        int base = 0;
+        ArithmeticOp *ao = index->as_ArithmeticOp();
+
+        while (ao != NULL && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {
+          c = ao->y()->as_Constant();
+          Instruction *other = ao->x();
+          if (!c && ao->op() == Bytecodes::_iadd) {
+            c = ao->x()->as_Constant();
+            other = ao->y();
+          }
+
+          if (c) {
+            int value = c->type()->as_IntConstant()->value();
+            if (value != min_jint) {
+              if (ao->op() == Bytecodes::_isub) {
+                value = -value;
+              }
+              base += value;
+              last_integer = base;
+              last_instruction = other;
+            }
+            index = other;
+          } else {
+            break;
+          }
+          ao = index->as_ArithmeticOp();
+        }
+        add_access_indexed_info(indices, last_integer, last_instruction, ai);
+      }
+    }
+
+    // Iterate over all different indices
+    if (_optimistic) {
+      for (int i=0; i<indices.length(); i++) {
+        Instruction *index_instruction = indices.at(i);
+        AccessIndexedInfo *info = _access_indexed_info[index_instruction->id()];
+        assert(info != NULL, "Info must not be null");
+
+        // if idx < 0, max > 0, max + idx may fall between 0 and
+        // length-1 and if min < 0, min + idx may overflow and be >=
+        // 0. The predicate wouldn't trigger but some accesses could
+        // be with a negative index. This test guarantees that for the
+        // min and max value that are kept the predicate can't let
+        // some incorrect accesses happen.
+        bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);
+
+        // Generate code only if more than 2 range checks can be eliminated because of that.
+        // 2 because at least 2 comparisons are done
+        if (info->_list->length() > 2 && range_cond) {
+          AccessIndexed *first = info->_list->at(0);
+          Instruction *insert_position = first->prev();
+          assert(insert_position->next() == first, "prev was calculated");
+          ValueStack *state = first->state_before();
+
+          // Load min Constant
+          Constant *min_constant = NULL;
+          if (info->_min != 0) {
+            min_constant = new Constant(new IntConstant(info->_min));
+            NOT_PRODUCT(min_constant->set_printable_bci(first->printable_bci()));
+            insert_position = insert_position->insert_after(min_constant);
+          }
+
+          // Load max Constant
+          Constant *max_constant = NULL;
+          if (info->_max != 0) {
+            max_constant = new Constant(new IntConstant(info->_max));
+            NOT_PRODUCT(max_constant->set_printable_bci(first->printable_bci()));
+            insert_position = insert_position->insert_after(max_constant);
+          }
+
+          // Load array length
+          Value length_instr = first->length();
+          if (!length_instr) {
+            ArrayLength *length = new ArrayLength(array, first->state_before()->copy());
+            length->set_exception_state(length->state_before());
+            length->set_flag(Instruction::DeoptimizeOnException, true);
+            insert_position = insert_position->insert_after_same_bci(length);
+            length_instr = length;
+          }
+
+          // Calculate lower bound
+          Instruction *lower_compare = index_instruction;
+          if (min_constant) {
+            ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, min_constant, lower_compare, false, NULL);
+            insert_position = insert_position->insert_after_same_bci(ao);
+            lower_compare = ao;
+          }
+
+          // Calculate upper bound
+          Instruction *upper_compare = index_instruction;
+          if (max_constant) {
+            ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, max_constant, upper_compare, false, NULL);
+            insert_position = insert_position->insert_after_same_bci(ao);
+            upper_compare = ao;
+          }
+
+          // Trick with unsigned compare is done
+          int bci = NOT_PRODUCT(first->printable_bci()) PRODUCT_ONLY(-1);
+          insert_position = predicate(upper_compare, Instruction::aeq, length_instr, state, insert_position, bci);
+          insert_position = predicate_cmp_with_const(lower_compare, Instruction::leq, -1, state, insert_position);
+          for (int j = 0; j<info->_list->length(); j++) {
+            AccessIndexed *ai = info->_list->at(j);
+            remove_range_check(ai);
+          }
+        }
+        _access_indexed_info[index_instruction->id()] = NULL;
+      }
+      indices.clear();
+
+      if (list_constant.length() > 1) {
+        AccessIndexed *first = list_constant.at(0);
+        Instruction *insert_position = first->prev();
+        ValueStack *state = first->state_before();
+        // Load max Constant
+        Constant *constant = new Constant(new IntConstant(max_constant));
+        NOT_PRODUCT(constant->set_printable_bci(first->printable_bci()));
+        insert_position = insert_position->insert_after(constant);
+        Instruction *compare_instr = constant;
+        Value length_instr = first->length();
+        if (!length_instr) {
+          ArrayLength *length = new ArrayLength(array, state->copy());
+          length->set_exception_state(length->state_before());
+          length->set_flag(Instruction::DeoptimizeOnException, true);
+          insert_position = insert_position->insert_after_same_bci(length);
+          length_instr = length;
+        }
+        // Compare for greater or equal to array length
+        insert_position = predicate(compare_instr, Instruction::geq, length_instr, state, insert_position);
+        for (int j = 0; j<list_constant.length(); j++) {
+          AccessIndexed *ai = list_constant.at(j);
+          remove_range_check(ai);
+        }
+      }
+    }
+  }
+}
+
+bool RangeCheckEliminator::set_process_block_flags(BlockBegin *block) {
+  Instruction *cur = block;
+  bool process = false;
+
+  while (cur) {
+    process |= (cur->as_AccessIndexed() != NULL);
+    cur = cur->next();
+  }
+
+  BlockList *dominates = block->dominates();
+  for (int i=0; i<dominates->length(); i++) {
+    BlockBegin *next = dominates->at(i);
+    process |= set_process_block_flags(next);
+  }
+
+  if (!process) {
+    block->set(BlockBegin::donot_eliminate_range_checks);
+  }
+  return process;
+}
+
+bool RangeCheckEliminator::is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper) {
+  bool upper_check = true;
+  assert(lower_instr || lower >= 0, "If no lower_instr present, lower must be greater 0");
+  assert(!lower_instr || lower_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+  assert(!upper_instr || upper_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+  assert(array_instr, "Array instruction must exist");
+  assert(array_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+  assert(!length_instr || length_instr->dominator_depth() <= insert_position->dominator_depth(), "Dominator depth must be smaller");
+
+  if (upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr) {
+    // static check
+    if (upper >= 0) return false; // would always trigger a deopt:
+                                  // array_length + x >= array_length, x >= 0 is always true
+    upper_check = false;
+  }
+  if (lower_instr && lower_instr->as_ArrayLength() && lower_instr->as_ArrayLength()->array() == array_instr) {
+    if (lower > 0) return false;
+  }
+  // No upper check required -> skip
+  if (upper_check && upper_instr && upper_instr->type()->as_ObjectType() && upper_instr == array_instr) {
+    // upper_instr is object means that the upper bound is the length
+    // of the upper_instr.
+    return false;
+  }
+  return true;
+}
+
+Instruction* RangeCheckEliminator::insert_after(Instruction* insert_position, Instruction* instr, int bci) {
+  if (bci != -1) {
+    NOT_PRODUCT(instr->set_printable_bci(bci));
+    return insert_position->insert_after(instr);
+  } else {
+    return insert_position->insert_after_same_bci(instr);
+  }
+}
+
+Instruction* RangeCheckEliminator::predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
+  RangeCheckPredicate *deoptimize = new RangeCheckPredicate(left, cond, true, right, state->copy());
+  return insert_after(insert_position, deoptimize, bci);
+}
+
+Instruction* RangeCheckEliminator::predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
+  Constant *const_instr = new Constant(new IntConstant(constant));
+  insert_position = insert_after(insert_position, const_instr, bci);
+  return predicate(instr, cond, const_instr, state, insert_position);
+}
+
+Instruction* RangeCheckEliminator::predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci) {
+  Constant *constant = new Constant(new IntConstant(left_const));
+  insert_position = insert_after(insert_position, constant, bci);
+  ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, left, false, NULL);
+  insert_position = insert_position->insert_after_same_bci(ao);
+  return predicate(ao, cond, right, state, insert_position);
+}
+
+Instruction* RangeCheckEliminator::predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci) {
+  Constant *const_instr = new Constant(new IntConstant(constant));
+  insert_position = insert_after(insert_position, const_instr, bci);
+  return predicate_add(left, left_const, cond, const_instr, state, insert_position);
+}
+
+// Insert deoptimization
+void RangeCheckEliminator::insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, Instruction *length_instr, Instruction *lower_instr, int lower, Instruction *upper_instr, int upper, AccessIndexed *ai) {
+  assert(is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, lower, upper_instr, upper), "should have been tested before");
+  bool upper_check = !(upper_instr && upper_instr->as_ArrayLength() && upper_instr->as_ArrayLength()->array() == array_instr);
+
+  int bci = NOT_PRODUCT(ai->printable_bci()) PRODUCT_ONLY(-1);
+  if (lower_instr) {
+    assert(!lower_instr->type()->as_ObjectType(), "Must not be object type");
+    if (lower == 0) {
+      // Compare for less than 0
+      insert_position = predicate_cmp_with_const(lower_instr, Instruction::lss, 0, state, insert_position, bci);
+    } else if (lower > 0) {
+      // Compare for smaller 0
+      insert_position = predicate_add_cmp_with_const(lower_instr, lower, Instruction::lss, 0, state, insert_position, bci);
+    } else {
+      assert(lower < 0, "");
+      // Add 1
+      lower++;
+      lower = -lower;
+      // Compare for smaller or equal 0
+      insert_position = predicate_cmp_with_const(lower_instr, Instruction::leq, lower, state, insert_position, bci);
+    }
+  }
+
+  // No upper check required -> skip
+  if (!upper_check) return;
+
+  // We need to know length of array
+  if (!length_instr) {
+    // Load length if necessary
+    ArrayLength *length = new ArrayLength(array_instr, state->copy());
+    NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
+    length->set_exception_state(length->state_before());
+    length->set_flag(Instruction::DeoptimizeOnException, true);
+    insert_position = insert_position->insert_after(length);
+    length_instr = length;
+  }
+
+  if (!upper_instr) {
+    // Compare for geq array.length
+    insert_position = predicate_cmp_with_const(length_instr, Instruction::leq, upper, state, insert_position, bci);
+  } else {
+    if (upper_instr->type()->as_ObjectType()) {
+      assert(state, "must not be null");
+      assert(upper_instr != array_instr, "should be");
+      ArrayLength *length = new ArrayLength(upper_instr, state->copy());
+      NOT_PRODUCT(length->set_printable_bci(ai->printable_bci()));
+      length->set_flag(Instruction::DeoptimizeOnException, true);
+      length->set_exception_state(length->state_before());
+      insert_position = insert_position->insert_after(length);
+      upper_instr = length;
+    }
+    assert(upper_instr->type()->as_IntType(), "Must not be object type!");
+
+    if (upper == 0) {
+      // Compare for geq array.length
+      insert_position = predicate(upper_instr, Instruction::geq, length_instr, state, insert_position, bci);
+    } else if (upper < 0) {
+      // Compare for geq array.length
+      insert_position = predicate_add(upper_instr, upper, Instruction::geq, length_instr, state, insert_position, bci);
+    } else {
+      assert(upper > 0, "");
+      upper = -upper;
+      // Compare for geq array.length
+      insert_position = predicate_add(length_instr, upper, Instruction::leq, upper_instr, state, insert_position, bci);
+    }
+  }
+}
+
+// Add if condition
+void RangeCheckEliminator::add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition) {
+  if (y->as_Constant()) return;
+
+  int const_value = 0;
+  Value instr_value = x;
+  Constant *c = x->as_Constant();
+  ArithmeticOp *ao = x->as_ArithmeticOp();
+
+  if (c != NULL) {
+    const_value = c->type()->as_IntConstant()->value();
+    instr_value = NULL;
+  } else if (ao != NULL &&  (!ao->x()->as_Constant() || !ao->y()->as_Constant()) && ((ao->op() == Bytecodes::_isub && ao->y()->as_Constant()) || ao->op() == Bytecodes::_iadd)) {
+    assert(!ao->x()->as_Constant() || !ao->y()->as_Constant(), "At least one operator must be non-constant!");
+    assert(ao->op() == Bytecodes::_isub || ao->op() == Bytecodes::_iadd, "Operation has to be add or sub!");
+    c = ao->x()->as_Constant();
+    if (c != NULL) {
+      const_value = c->type()->as_IntConstant()->value();
+      instr_value = ao->y();
+    } else {
+      c = ao->y()->as_Constant();
+      if (c != NULL) {
+        const_value = c->type()->as_IntConstant()->value();
+        instr_value = ao->x();
+      }
+    }
+    if (ao->op() == Bytecodes::_isub) {
+      assert(ao->y()->as_Constant(), "1 - x not supported, only x - 1 is valid!");
+      if (const_value > min_jint) {
+        const_value = -const_value;
+      } else {
+        const_value = 0;
+        instr_value = x;
+      }
+    }
+  }
+
+  update_bound(pushed, y, condition, instr_value, const_value);
+}
+
+// Process If
+void RangeCheckEliminator::process_if(IntegerStack &pushed, BlockBegin *block, If *cond) {
+  // Only if we are direct true / false successor and NOT both ! (even this may occur)
+  if ((cond->tsux() == block || cond->fsux() == block) && cond->tsux() != cond->fsux()) {
+    Instruction::Condition condition = cond->cond();
+    if (cond->fsux() == block) {
+      condition = Instruction::negate(condition);
+    }
+    Value x = cond->x();
+    Value y = cond->y();
+    if (x->type()->as_IntType() && y->type()->as_IntType()) {
+      add_if_condition(pushed, y, x, condition);
+      add_if_condition(pushed, x, y, Instruction::mirror(condition));
+    }
+  }
+}
+
+// Process access indexed
+void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai) {
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->fill_to(block->dominator_depth()*2)
+  );
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->print_cr("Access indexed: index=%d length=%d", ai->index()->id(), (ai->length() != NULL ? ai->length()->id() :-1 ))
+  );
+
+  if (ai->check_flag(Instruction::NeedsRangeCheckFlag)) {
+    Bound *index_bound = get_bound(ai->index());
+    if (!index_bound->has_lower() || !index_bound->has_upper()) {
+      TRACE_RANGE_CHECK_ELIMINATION(
+        tty->fill_to(block->dominator_depth()*2);
+        tty->print_cr("Index instruction %d has no lower and/or no upper bound!", ai->index()->id())
+      );
+      return;
+    }
+
+    Bound *array_bound;
+    if (ai->length()) {
+      array_bound = get_bound(ai->length());
+    } else {
+      array_bound = get_bound(ai->array());
+    }
+
+    if (in_array_bound(index_bound, ai->array()) ||
+      (index_bound && array_bound && index_bound->is_smaller(array_bound) && !index_bound->lower_instr() && index_bound->lower() >= 0)) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Bounds check for instruction %d in block B%d can be fully eliminated!", ai->id(), ai->block()->block_id())
+        );
+
+        remove_range_check(ai);
+    } else if (_optimistic && loop_header) {
+      assert(ai->array(), "Array must not be null!");
+      assert(ai->index(), "Index must not be null!");
+
+      // Array instruction
+      Instruction *array_instr = ai->array();
+      if (!loop_invariant(loop_header, array_instr)) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Array %d is not loop invariant to header B%d", ai->array()->id(), loop_header->block_id())
+        );
+        return;
+      }
+
+      // Lower instruction
+      Value index_instr = ai->index();
+      Value lower_instr = index_bound->lower_instr();
+      if (!loop_invariant(loop_header, lower_instr)) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Lower instruction %d not loop invariant!", lower_instr->id())
+        );
+        return;
+      }
+      if (!lower_instr && index_bound->lower() < 0) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Lower bound smaller than 0 (%d)!", index_bound->lower())
+        );
+        return;
+      }
+
+      // Upper instruction
+      Value upper_instr = index_bound->upper_instr();
+      if (!loop_invariant(loop_header, upper_instr)) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Upper instruction %d not loop invariant!", upper_instr->id())
+        );
+        return;
+      }
+
+      // Length instruction
+      Value length_instr = ai->length();
+      if (!loop_invariant(loop_header, length_instr)) {
+        // Generate length instruction yourself!
+        length_instr = NULL;
+      }
+
+      TRACE_RANGE_CHECK_ELIMINATION(
+        tty->fill_to(block->dominator_depth()*2);
+        tty->print_cr("LOOP INVARIANT access indexed %d found in block B%d!", ai->id(), ai->block()->block_id())
+      );
+
+      BlockBegin *pred_block = loop_header->dominator();
+      assert(pred_block != NULL, "Every loop header has a dominator!");
+      BlockEnd *pred_block_end = pred_block->end();
+      Instruction *insert_position = pred_block_end->prev();
+      ValueStack *state = pred_block_end->state_before();
+      if (pred_block_end->as_Goto() && state == NULL) state = pred_block_end->state();
+      assert(state, "State must not be null");
+
+      // Add deoptimization to dominator of loop header
+      TRACE_RANGE_CHECK_ELIMINATION(
+        tty->fill_to(block->dominator_depth()*2);
+        tty->print_cr("Inserting deopt at bci %d in block B%d!", state->bci(), insert_position->block()->block_id())
+      );
+
+      if (!is_ok_for_deoptimization(insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper())) {
+        TRACE_RANGE_CHECK_ELIMINATION(
+          tty->fill_to(block->dominator_depth()*2);
+          tty->print_cr("Could not eliminate because of static analysis!")
+        );
+        return;
+      }
+
+      insert_deoptimization(state, insert_position, array_instr, length_instr, lower_instr, index_bound->lower(), upper_instr, index_bound->upper(), ai);
+
+      // Finally remove the range check!
+      remove_range_check(ai);
+    }
+  }
+}
+
+void RangeCheckEliminator::remove_range_check(AccessIndexed *ai) {
+  ai->set_flag(Instruction::NeedsRangeCheckFlag, false);
+  // no range check, no need for the length instruction anymore
+  ai->clear_length();
+
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->fill_to(ai->dominator_depth()*2);
+    tty->print_cr("Range check for instruction %d eliminated!", ai->id());
+  );
+
+  ASSERT_RANGE_CHECK_ELIMINATION(
+    Value array_length = ai->length();
+    if (!array_length) {
+      array_length = ai->array();
+      assert(array_length->type()->as_ObjectType(), "Has to be object type!");
+    }
+    int cur_constant = -1;
+    Value cur_value = array_length;
+    if (cur_value->type()->as_IntConstant()) {
+      cur_constant += cur_value->type()->as_IntConstant()->value();
+      cur_value = NULL;
+    }
+    Bound *new_index_bound = new Bound(0, NULL, cur_constant, cur_value);
+    add_assertions(new_index_bound, ai->index(), ai);
+  );
+}
+
+// Calculate bounds for instruction in this block and children blocks in the dominator tree
+void RangeCheckEliminator::calc_bounds(BlockBegin *block, BlockBegin *loop_header) {
+  // Ensures a valid loop_header
+  assert(!loop_header || loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Loop header has to be real !");
+
+  // Tracing output
+  TRACE_RANGE_CHECK_ELIMINATION(
+    tty->fill_to(block->dominator_depth()*2);
+    tty->print_cr("Block B%d", block->block_id());
+  );
+
+  // Pushed stack for conditions
+  IntegerStack pushed;
+  // Process If
+  BlockBegin *parent = block->dominator();
+  if (parent != NULL) {
+    If *cond = parent->end()->as_If();
+    if (cond != NULL) {
+      process_if(pushed, block, cond);
+    }
+  }
+
+  // Interate over current block
+  InstructionList arrays;
+  AccessIndexedList accessIndexed;
+  Instruction *cur = block;
+
+  while (cur) {
+    // Ensure cur wasn't inserted during the elimination
+    if (cur->id() < this->_bounds.length()) {
+      // Process only if it is an access indexed instruction
+      AccessIndexed *ai = cur->as_AccessIndexed();
+      if (ai != NULL) {
+        process_access_indexed(loop_header, block, ai);
+        accessIndexed.append(ai);
+        if (!arrays.contains(ai->array())) {
+          arrays.append(ai->array());
+        }
+        Bound *b = get_bound(ai->index());
+        if (!b->lower_instr()) {
+          // Lower bound is constant
+          update_bound(pushed, ai->index(), Instruction::geq, NULL, 0);
+        }
+        if (!b->has_upper()) {
+          if (ai->length() && ai->length()->type()->as_IntConstant()) {
+            int value = ai->length()->type()->as_IntConstant()->value();
+            update_bound(pushed, ai->index(), Instruction::lss, NULL, value);
+          } else {
+            // Has no upper bound
+            Instruction *instr = ai->length();
+            if (instr != NULL) instr = ai->array();
+            update_bound(pushed, ai->index(), Instruction::lss, instr, 0);
+          }
+        }
+      }
+    }
+    cur = cur->next();
+  }
+
+  // Output current condition stack
+  TRACE_RANGE_CHECK_ELIMINATION(dump_condition_stack(block));
+
+  // Do in block motion of range checks
+  in_block_motion(block, accessIndexed, arrays);
+
+  // Call all dominated blocks
+  for (int i=0; i<block->dominates()->length(); i++) {
+    BlockBegin *next = block->dominates()->at(i);
+    if (!next->is_set(BlockBegin::donot_eliminate_range_checks)) {
+      // if current block is a loop header and:
+      // - next block belongs to the same loop
+      // or
+      // - next block belongs to an inner loop
+      // then current block is the loop header for next block
+      if (block->is_set(BlockBegin::linear_scan_loop_header_flag) && (block->loop_index() == next->loop_index() || next->loop_depth() > block->loop_depth())) {
+        calc_bounds(next, block);
+      } else {
+        calc_bounds(next, loop_header);
+      }
+    }
+  }
+
+  // Reset stack
+  for (int i=0; i<pushed.length(); i++) {
+    _bounds[pushed[i]]->pop();
+  }
+}
+
+#ifndef PRODUCT
+// Dump condition stack
+void RangeCheckEliminator::dump_condition_stack(BlockBegin *block) {
+  for (int i=0; i<_ir->linear_scan_order()->length(); i++) {
+    BlockBegin *cur_block = _ir->linear_scan_order()->at(i);
+    Instruction *instr = cur_block;
+    for_each_phi_fun(cur_block, phi,
+                     BoundStack *bound_stack = _bounds.at(phi->id());
+                     if (bound_stack && bound_stack->length() > 0) {
+                       Bound *bound = bound_stack->top();
+                       if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != phi || bound->upper_instr() != phi || bound->lower() != 0 || bound->upper() != 0)) {
+                           TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
+                                                         tty->print("i%d", phi->id());
+                                                         tty->print(": ");
+                                                         bound->print();
+                                                         tty->print_cr("");
+                           );
+                         }
+                     });
+
+    while (!instr->as_BlockEnd()) {
+      if (instr->id() < _bounds.length()) {
+        BoundStack *bound_stack = _bounds.at(instr->id());
+        if (bound_stack && bound_stack->length() > 0) {
+          Bound *bound = bound_stack->top();
+          if ((bound->has_lower() || bound->has_upper()) && (bound->lower_instr() != instr || bound->upper_instr() != instr || bound->lower() != 0 || bound->upper() != 0)) {
+              TRACE_RANGE_CHECK_ELIMINATION(tty->fill_to(2*block->dominator_depth());
+                                            tty->print("i%d", instr->id());
+                                            tty->print(": ");
+                                            bound->print();
+                                            tty->print_cr("");
+              );
+          }
+        }
+      }
+      instr = instr->next();
+    }
+  }
+}
+#endif
+
+// Verification or the IR
+RangeCheckEliminator::Verification::Verification(IR *ir) : _used(BlockBegin::number_of_blocks(), false) {
+  this->_ir = ir;
+  ir->iterate_linear_scan_order(this);
+}
+
+// Verify this block
+void RangeCheckEliminator::Verification::block_do(BlockBegin *block) {
+  If *cond = block->end()->as_If();
+  // Watch out: tsux and fsux can be the same!
+  if (block->number_of_sux() > 1) {
+    for (int i=0; i<block->number_of_sux(); i++) {
+      BlockBegin *sux = block->sux_at(i);
+      BlockBegin *pred = NULL;
+      for (int j=0; j<sux->number_of_preds(); j++) {
+        BlockBegin *cur = sux->pred_at(j);
+        assert(cur != NULL, "Predecessor must not be null");
+        if (!pred) {
+          pred = cur;
+        }
+        assert(cur == pred, "Block must not have more than one predecessor if its predecessor has more than one successor");
+      }
+      assert(sux->number_of_preds() >= 1, "Block must have at least one predecessor");
+      assert(sux->pred_at(0) == block, "Wrong successor");
+    }
+  }
+
+  BlockBegin *dominator = block->dominator();
+  if (dominator) {
+    assert(block != _ir->start(), "Start block must not have a dominator!");
+    assert(can_reach(dominator, block), "Dominator can't reach his block !");
+    assert(can_reach(_ir->start(), dominator), "Dominator is unreachable !");
+    assert(!can_reach(_ir->start(), block, dominator), "Wrong dominator ! Block can be reached anyway !");
+    BlockList *all_blocks = _ir->linear_scan_order();
+    for (int i=0; i<all_blocks->length(); i++) {
+      BlockBegin *cur = all_blocks->at(i);
+      if (cur != dominator && cur != block) {
+        assert(can_reach(dominator, block, cur), "There has to be another dominator!");
+      }
+    }
+  } else {
+    assert(block == _ir->start(), "Only start block must not have a dominator");
+  }
+
+  if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
+    int loop_index = block->loop_index();
+    BlockList *all_blocks = _ir->linear_scan_order();
+    assert(block->number_of_preds() >= 1, "Block must have at least one predecessor");
+    assert(!block->is_set(BlockBegin::exception_entry_flag), "Loop header must not be exception handler!");
+    // Sometimes, the backbranch comes from an exception handler. In
+    // this case, loop indexes/loop depths may not appear correct.
+    bool loop_through_xhandler = false;
+    for (int i = 0; i < block->number_of_exception_handlers(); i++) {
+      BlockBegin *xhandler = block->exception_handler_at(i);
+      for (int j = 0; j < block->number_of_preds(); j++) {
+        if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {
+          loop_through_xhandler = true;
+        }
+      }
+    }
+
+    for (int i=0; i<block->number_of_sux(); i++) {
+      BlockBegin *sux = block->sux_at(i);
+      assert(sux->loop_depth() != block->loop_depth() || sux->loop_index() == block->loop_index() || loop_through_xhandler, "Loop index has to be same");
+      assert(sux->loop_depth() == block->loop_depth() || sux->loop_index() != block->loop_index(), "Loop index has to be different");
+    }
+
+    for (int i=0; i<all_blocks->length(); i++) {
+      BlockBegin *cur = all_blocks->at(i);
+      if (cur->loop_index() == loop_index && cur != block) {
+        assert(dominates(block->dominator(), cur), "Dominator of loop header must dominate all loop blocks");
+      }
+    }
+  }
+
+  Instruction *cur = block;
+  while (cur) {
+    assert(cur->block() == block, "Block begin has to be set correctly!");
+    cur = cur->next();
+  }
+}
+
+// Loop header must dominate all loop blocks
+bool RangeCheckEliminator::Verification::dominates(BlockBegin *dominator, BlockBegin *block) {
+  BlockBegin *cur = block->dominator();
+  while (cur && cur != dominator) {
+    cur = cur->dominator();
+  }
+  return cur == dominator;
+}
+
+// Try to reach Block end beginning in Block start and not using Block dont_use
+bool RangeCheckEliminator::Verification::can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use /* = NULL */) {
+  if (start == end) return start != dont_use;
+  // Simple BSF from start to end
+  //  BlockBeginList _current;
+  for (int i=0; i<_used.length(); i++) {
+    _used[i] = false;
+  }
+  _current.truncate(0);
+  _successors.truncate(0);
+  if (start != dont_use) {
+    _current.push(start);
+    _used[start->block_id()] = true;
+  }
+
+  //  BlockBeginList _successors;
+  while (_current.length() > 0) {
+    BlockBegin *cur = _current.pop();
+    // Add exception handlers to list
+    for (int i=0; i<cur->number_of_exception_handlers(); i++) {
+      BlockBegin *xhandler = cur->exception_handler_at(i);
+      _successors.push(xhandler);
+      // Add exception handlers of _successors to list
+      for (int j=0; j<xhandler->number_of_exception_handlers(); j++) {
+        BlockBegin *sux_xhandler = xhandler->exception_handler_at(j);
+        _successors.push(sux_xhandler);
+      }
+    }
+    // Add normal _successors to list
+    for (int i=0; i<cur->number_of_sux(); i++) {
+      BlockBegin *sux = cur->sux_at(i);
+      _successors.push(sux);
+      // Add exception handlers of _successors to list
+      for (int j=0; j<sux->number_of_exception_handlers(); j++) {
+        BlockBegin *xhandler = sux->exception_handler_at(j);
+        _successors.push(xhandler);
+      }
+    }
+    for (int i=0; i<_successors.length(); i++) {
+      BlockBegin *sux = _successors[i];
+      assert(sux != NULL, "Successor must not be NULL!");
+      if (sux == end) {
+        return true;
+      }
+      if (sux != dont_use && !_used[sux->block_id()]) {
+        _used[sux->block_id()] = true;
+        _current.push(sux);
+      }
+    }
+    _successors.truncate(0);
+  }
+
+  return false;
+}
+
+// Bound
+RangeCheckEliminator::Bound::~Bound() {
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound() {
+  init();
+  this->_lower = min_jint;
+  this->_upper = max_jint;
+  this->_lower_instr = NULL;
+  this->_upper_instr = NULL;
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound(int lower, Value lower_instr, int upper, Value upper_instr) {
+  init();
+  assert(!lower_instr || !lower_instr->as_Constant() || !lower_instr->type()->as_IntConstant(), "Must not be constant!");
+  assert(!upper_instr || !upper_instr->as_Constant() || !upper_instr->type()->as_IntConstant(), "Must not be constant!");
+  this->_lower = lower;
+  this->_upper = upper;
+  this->_lower_instr = lower_instr;
+  this->_upper_instr = upper_instr;
+}
+
+// Bound constructor
+RangeCheckEliminator::Bound::Bound(Instruction::Condition cond, Value v, int constant) {
+  assert(!v || (v->type() && (v->type()->as_IntType() || v->type()->as_ObjectType())), "Type must be array or integer!");
+  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+
+  init();
+  if (cond == Instruction::eql) {
+    _lower = constant;
+    _lower_instr = v;
+    _upper = constant;
+    _upper_instr = v;
+  } else if (cond == Instruction::neq) {
+    _lower = min_jint;
+    _upper = max_jint;
+    _lower_instr = NULL;
+    _upper_instr = NULL;
+    if (v == NULL) {
+      if (constant == min_jint) {
+        _lower++;
+      }
+      if (constant == max_jint) {
+        _upper--;
+      }
+    }
+  } else if (cond == Instruction::geq) {
+    _lower = constant;
+    _lower_instr = v;
+    _upper = max_jint;
+    _upper_instr = NULL;
+  } else if (cond == Instruction::leq) {
+    _lower = min_jint;
+    _lower_instr = NULL;
+    _upper = constant;
+    _upper_instr = v;
+  } else {
+    ShouldNotReachHere();
+  }
+}
+
+// Set lower
+void RangeCheckEliminator::Bound::set_lower(int value, Value v) {
+  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+  this->_lower = value;
+  this->_lower_instr = v;
+}
+
+// Set upper
+void RangeCheckEliminator::Bound::set_upper(int value, Value v) {
+  assert(!v || !v->as_Constant() || !v->type()->as_IntConstant(), "Must not be constant!");
+  this->_upper = value;
+  this->_upper_instr = v;
+}
+
+// Add constant -> no overflow may occur
+void RangeCheckEliminator::Bound::add_constant(int value) {
+  this->_lower += value;
+  this->_upper += value;
+}
+
+// Init
+void RangeCheckEliminator::Bound::init() {
+}
+
+// or
+void RangeCheckEliminator::Bound::or_op(Bound *b) {
+  // Watch out, bound is not guaranteed not to overflow!
+  // Update lower bound
+  if (_lower_instr != b->_lower_instr || (_lower_instr && _lower != b->_lower)) {
+    _lower_instr = NULL;
+    _lower = min_jint;
+  } else {
+    _lower = MIN2(_lower, b->_lower);
+  }
+  // Update upper bound
+  if (_upper_instr != b->_upper_instr || (_upper_instr && _upper != b->_upper)) {
+    _upper_instr = NULL;
+    _upper = max_jint;
+  } else {
+    _upper = MAX2(_upper, b->_upper);
+  }
+}
+
+// and
+void RangeCheckEliminator::Bound::and_op(Bound *b) {
+  // Update lower bound
+  if (_lower_instr == b->_lower_instr) {
+    _lower = MAX2(_lower, b->_lower);
+  }
+  if (b->has_lower()) {
+    bool set = true;
+    if (_lower_instr != NULL && b->_lower_instr != NULL) {
+      set = (_lower_instr->dominator_depth() > b->_lower_instr->dominator_depth());
+    }
+    if (set) {
+      _lower = b->_lower;
+      _lower_instr = b->_lower_instr;
+    }
+  }
+  // Update upper bound
+  if (_upper_instr == b->_upper_instr) {
+    _upper = MIN2(_upper, b->_upper);
+  }
+  if (b->has_upper()) {
+    bool set = true;
+    if (_upper_instr != NULL && b->_upper_instr != NULL) {
+      set = (_upper_instr->dominator_depth() > b->_upper_instr->dominator_depth());
+    }
+    if (set) {
+      _upper = b->_upper;
+      _upper_instr = b->_upper_instr;
+    }
+  }
+}
+
+// has_upper
+bool RangeCheckEliminator::Bound::has_upper() {
+  return _upper_instr != NULL || _upper < max_jint;
+}
+
+// is_smaller
+bool RangeCheckEliminator::Bound::is_smaller(Bound *b) {
+  if (b->_lower_instr != _upper_instr) {
+    return false;
+  }
+  return _upper < b->_lower;
+}
+
+// has_lower
+bool RangeCheckEliminator::Bound::has_lower() {
+  return _lower_instr != NULL || _lower > min_jint;
+}
+
+// in_array_bound
+bool RangeCheckEliminator::in_array_bound(Bound *bound, Value array){
+  if (!bound) return false;
+  assert(array != NULL, "Must not be null!");
+  assert(bound != NULL, "Must not be null!");
+  if (bound->lower() >=0 && bound->lower_instr() == NULL && bound->upper() < 0 && bound->upper_instr() != NULL) {
+    ArrayLength *len = bound->upper_instr()->as_ArrayLength();
+    if (bound->upper_instr() == array || (len != NULL && len->array() == array)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+// remove_lower
+void RangeCheckEliminator::Bound::remove_lower() {
+  _lower = min_jint;
+  _lower_instr = NULL;
+}
+
+// remove_upper
+void RangeCheckEliminator::Bound::remove_upper() {
+  _upper = max_jint;
+  _upper_instr = NULL;
+}
+
+// upper
+int RangeCheckEliminator::Bound::upper() {
+  return _upper;
+}
+
+// lower
+int RangeCheckEliminator::Bound::lower() {
+  return _lower;
+}
+
+// upper_instr
+Value RangeCheckEliminator::Bound::upper_instr() {
+  return _upper_instr;
+}
+
+// lower_instr
+Value RangeCheckEliminator::Bound::lower_instr() {
+  return _lower_instr;
+}
+
+// print
+void RangeCheckEliminator::Bound::print() {
+  tty->print("");
+  if (this->_lower_instr || this->_lower != min_jint) {
+    if (this->_lower_instr) {
+      tty->print("i%d", this->_lower_instr->id());
+      if (this->_lower > 0) {
+        tty->print("+%d", _lower);
+      }
+      if (this->_lower < 0) {
+        tty->print("%d", _lower);
+      }
+    } else {
+      tty->print("%d", _lower);
+    }
+    tty->print(" <= ");
+  }
+  tty->print("x");
+  if (this->_upper_instr || this->_upper != max_jint) {
+    tty->print(" <= ");
+    if (this->_upper_instr) {
+      tty->print("i%d", this->_upper_instr->id());
+      if (this->_upper > 0) {
+        tty->print("+%d", _upper);
+      }
+      if (this->_upper < 0) {
+        tty->print("%d", _upper);
+      }
+    } else {
+      tty->print("%d", _upper);
+    }
+  }
+}
+
+// Copy
+RangeCheckEliminator::Bound *RangeCheckEliminator::Bound::copy() {
+  Bound *b = new Bound();
+  b->_lower = _lower;
+  b->_lower_instr = _lower_instr;
+  b->_upper = _upper;
+  b->_upper_instr = _upper_instr;
+  return b;
+}
+
+#ifdef ASSERT
+// Add assertion
+void RangeCheckEliminator::Bound::add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond) {
+  Instruction *result = position;
+  Instruction *compare_with = NULL;
+  ValueStack *state = position->state_before();
+  if (position->as_BlockEnd() && !position->as_Goto()) {
+    state = position->as_BlockEnd()->state_before();
+  }
+  Instruction *instruction_before = position->prev();
+  if (position->as_Return() && Compilation::current()->method()->is_synchronized() && instruction_before->as_MonitorExit()) {
+    instruction_before = instruction_before->prev();
+  }
+  result = instruction_before;
+  // Load constant only if needed
+  Constant *constant = NULL;
+  if (i != 0 || !instr) {
+    constant = new Constant(new IntConstant(i));
+    NOT_PRODUCT(constant->set_printable_bci(position->printable_bci()));
+    result = result->insert_after(constant);
+    compare_with = constant;
+  }
+
+  if (instr) {
+    assert(instr->type()->as_ObjectType() || instr->type()->as_IntType(), "Type must be array or integer!");
+    compare_with = instr;
+    // Load array length if necessary
+    Instruction *op = instr;
+    if (instr->type()->as_ObjectType()) {
+      assert(state, "must not be null");
+      ArrayLength *length = new ArrayLength(instr, state->copy());
+      NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
+      length->set_exception_state(length->state_before());
+      result = result->insert_after(length);
+      op = length;
+      compare_with = length;
+    }
+    // Add operation only if necessary
+    if (constant) {
+      ArithmeticOp *ao = new ArithmeticOp(Bytecodes::_iadd, constant, op, false, NULL);
+      NOT_PRODUCT(ao->set_printable_bci(position->printable_bci()));
+      result = result->insert_after(ao);
+      compare_with = ao;
+      // TODO: Check that add operation does not overflow!
+    }
+  }
+  assert(compare_with != NULL, "You have to compare with something!");
+  assert(instruction != NULL, "Instruction must not be null!");
+
+  if (instruction->type()->as_ObjectType()) {
+    // Load array length if necessary
+    Instruction *op = instruction;
+    assert(state, "must not be null");
+    ArrayLength *length = new ArrayLength(instruction, state->copy());
+    length->set_exception_state(length->state_before());
+    NOT_PRODUCT(length->set_printable_bci(position->printable_bci()));
+    result = result->insert_after(length);
+    instruction = length;
+  }
+
+  Assert *assert = new Assert(instruction, cond, false, compare_with);
+  NOT_PRODUCT(assert->set_printable_bci(position->printable_bci()));
+  result->insert_after(assert);
+}
+
+// Add assertions
+void RangeCheckEliminator::add_assertions(Bound *bound, Instruction *instruction, Instruction *position) {
+  // Add lower bound assertion
+  if (bound->has_lower()) {
+    bound->add_assertion(instruction, position, bound->lower(), bound->lower_instr(), Instruction::geq);
+  }
+  // Add upper bound assertion
+  if (bound->has_upper()) {
+    bound->add_assertion(instruction, position, bound->upper(), bound->upper_instr(), Instruction::leq);
+  }
+}
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
+#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
+
+#include "c1/c1_Instruction.hpp"
+
+// Base class for range check elimination
+class RangeCheckElimination : AllStatic {
+public:
+  static void eliminate(IR *ir);
+};
+
+// Implementation
+class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
+private:
+  int _number_of_instructions;
+  bool _optimistic; // Insert predicates and deoptimize when they fail
+  IR *_ir;
+
+  define_array(BlockBeginArray, BlockBegin*)
+  define_stack(BlockBeginList, BlockBeginArray)
+  define_stack(IntegerStack, intArray)
+  define_array(IntegerMap, IntegerStack*)
+
+  class Verification : public _ValueObj /*VALUE_OBJ_CLASS_SPEC*/, public BlockClosure {
+  private:
+    IR *_ir;
+    boolArray _used;
+    BlockBeginList _current;
+    BlockBeginList _successors;
+
+  public:
+    Verification(IR *ir);
+    virtual void block_do(BlockBegin *block);
+    bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
+    bool dominates(BlockBegin *dominator, BlockBegin *block);
+  };
+
+public:
+  // Bounds for an instruction in the form x + c which c integer
+  // constant and x another instruction
+  class Bound : public CompilationResourceObj {
+  private:
+    int _upper;
+    Value _upper_instr;
+    int _lower;
+    Value _lower_instr;
+
+  public:
+    Bound();
+    Bound(Value v);
+    Bound(Instruction::Condition cond, Value v, int constant = 0);
+    Bound(int lower, Value lower_instr, int upper, Value upper_instr);
+    ~Bound();
+
+#ifdef ASSERT
+    void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
+#endif
+    int upper();
+    Value upper_instr();
+    int lower();
+    Value lower_instr();
+    void print();
+    bool check_no_overflow(int const_value);
+    void or_op(Bound *b);
+    void and_op(Bound *b);
+    bool has_upper();
+    bool has_lower();
+    void set_upper(int upper, Value upper_instr);
+    void set_lower(int lower, Value lower_instr);
+    bool is_smaller(Bound *b);
+    void remove_upper();
+    void remove_lower();
+    void add_constant(int value);
+    Bound *copy();
+
+  private:
+    void init();
+  };
+
+
+  class Visitor : public InstructionVisitor {
+  private:
+    Bound *_bound;
+    RangeCheckEliminator *_rce;
+
+  public:
+    void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
+    Bound *bound() const { return _bound; }
+    void clear_bound() { _bound = NULL; }
+
+  protected:
+    // visitor functions
+    void do_Constant       (Constant*        x);
+    void do_IfOp           (IfOp*            x);
+    void do_LogicOp        (LogicOp*         x);
+    void do_ArithmeticOp   (ArithmeticOp*    x);
+    void do_Phi            (Phi*             x);
+
+    void do_StoreField     (StoreField*      x) { /* nothing to do */ };
+    void do_StoreIndexed   (StoreIndexed*    x) { /* nothing to do */ };
+    void do_MonitorEnter   (MonitorEnter*    x) { /* nothing to do */ };
+    void do_MonitorExit    (MonitorExit*     x) { /* nothing to do */ };
+    void do_Invoke         (Invoke*          x) { /* nothing to do */ };
+    void do_UnsafePutRaw   (UnsafePutRaw*    x) { /* nothing to do */ };
+    void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
+    void do_Intrinsic      (Intrinsic*       x) { /* nothing to do */ };
+    void do_Local          (Local*           x) { /* nothing to do */ };
+    void do_LoadField      (LoadField*       x) { /* nothing to do */ };
+    void do_ArrayLength    (ArrayLength*     x) { /* nothing to do */ };
+    void do_LoadIndexed    (LoadIndexed*     x) { /* nothing to do */ };
+    void do_NegateOp       (NegateOp*        x) { /* nothing to do */ };
+    void do_ShiftOp        (ShiftOp*         x) { /* nothing to do */ };
+    void do_CompareOp      (CompareOp*       x) { /* nothing to do */ };
+    void do_Convert        (Convert*         x) { /* nothing to do */ };
+    void do_NullCheck      (NullCheck*       x) { /* nothing to do */ };
+    void do_TypeCast       (TypeCast*        x) { /* nothing to do */ };
+    void do_NewInstance    (NewInstance*     x) { /* nothing to do */ };
+    void do_NewTypeArray   (NewTypeArray*    x) { /* nothing to do */ };
+    void do_NewObjectArray (NewObjectArray*  x) { /* nothing to do */ };
+    void do_NewMultiArray  (NewMultiArray*   x) { /* nothing to do */ };
+    void do_CheckCast      (CheckCast*       x) { /* nothing to do */ };
+    void do_InstanceOf     (InstanceOf*      x) { /* nothing to do */ };
+    void do_BlockBegin     (BlockBegin*      x) { /* nothing to do */ };
+    void do_Goto           (Goto*            x) { /* nothing to do */ };
+    void do_If             (If*              x) { /* nothing to do */ };
+    void do_IfInstanceOf   (IfInstanceOf*    x) { /* nothing to do */ };
+    void do_TableSwitch    (TableSwitch*     x) { /* nothing to do */ };
+    void do_LookupSwitch   (LookupSwitch*    x) { /* nothing to do */ };
+    void do_Return         (Return*          x) { /* nothing to do */ };
+    void do_Throw          (Throw*           x) { /* nothing to do */ };
+    void do_Base           (Base*            x) { /* nothing to do */ };
+    void do_OsrEntry       (OsrEntry*        x) { /* nothing to do */ };
+    void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
+    void do_RoundFP        (RoundFP*         x) { /* nothing to do */ };
+    void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ };
+    void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
+    void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
+    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
+    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
+    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
+    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
+    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
+    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
+    void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
+    void do_Assert         (Assert*          x) { /* nothing to do */ };
+  };
+
+#ifdef ASSERT
+  void add_assertions(Bound *bound, Instruction *instruction, Instruction *position);
+#endif
+
+  define_array(BoundArray, Bound *)
+  define_stack(BoundStack, BoundArray)
+  define_array(BoundMap, BoundStack *)
+  define_array(AccessIndexedArray, AccessIndexed *)
+  define_stack(AccessIndexedList, AccessIndexedArray)
+  define_array(InstructionArray, Instruction *)
+  define_stack(InstructionList, InstructionArray)
+
+  class AccessIndexedInfo : public CompilationResourceObj  {
+  public:
+    AccessIndexedList *_list;
+    int _min;
+    int _max;
+  };
+
+  define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
+  BoundMap _bounds; // Mapping from Instruction's id to current bound
+  AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
+  Visitor _visitor;
+
+public:
+  RangeCheckEliminator(IR *ir);
+
+  IR *ir() const { return _ir; }
+
+  // Pass over the dominator tree to identify blocks where there's an oppportunity for optimization
+  bool set_process_block_flags(BlockBegin *block);
+  // The core of the optimization work: pass over the dominator tree
+  // to propagate bound information, insert predicate out of loops,
+  // eliminate bound checks when possible and perform in block motion
+  void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
+  // reorder bound checks within a block in order to eliminate some of them
+  void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays);
+
+  // update/access current bound
+  void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
+  void update_bound(IntegerStack &pushed, Value v, Bound *bound);
+  Bound *get_bound(Value v);
+
+  bool loop_invariant(BlockBegin *loop_header, Instruction *instruction);                                    // check for loop invariance
+  void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
+  void remove_range_check(AccessIndexed *ai);                                                                // Mark this instructions as not needing a range check
+  void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition);           // Update bound for an If
+  bool in_array_bound(Bound *bound, Value array);                                                            // Check whether bound is known to fall within array
+
+  // helper functions to work with predicates
+  Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
+  Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
+  Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
+  Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
+  Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1);
+
+  void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr,      // Add predicate
+                             Instruction *length_instruction, Instruction *lower_instr, int lower,
+                             Instruction *upper_instr, int upper, AccessIndexed *ai);
+  bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr,                      // Can we safely add a predicate?
+                                Instruction *length_instr, Instruction *lower_instr,
+                                int lower, Instruction *upper_instr, int upper);
+  void process_if(IntegerStack &pushed, BlockBegin *block, If *cond);                                        // process If Instruction
+  void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai);                // process indexed access
+
+  void dump_condition_stack(BlockBegin *cur_block);
+  static void print_statistics();
+};
+
+#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1330,6 +1330,50 @@
   return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
 JRT_END
 
+JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
+  ResourceMark rm;
+
+  assert(!TieredCompilation, "incompatible with tiered compilation");
+
+  RegisterMap reg_map(thread, false);
+  frame runtime_frame = thread->last_frame();
+  frame caller_frame = runtime_frame.sender(&reg_map);
+
+  nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
+  assert (nm != NULL, "no more nmethod?");
+  nm->make_not_entrant();
+
+  methodHandle m(nm->method());
+  MethodData* mdo = m->method_data();
+
+  if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
+    // Build an MDO.  Ignore errors like OutOfMemory;
+    // that simply means we won't have an MDO to update.
+    Method::build_interpreter_method_data(m, THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
+      CLEAR_PENDING_EXCEPTION;
+    }
+    mdo = m->method_data();
+  }
+
+  if (mdo != NULL) {
+    mdo->inc_trap_count(Deoptimization::Reason_none);
+  }
+
+  if (TracePredicateFailedTraps) {
+    stringStream ss1, ss2;
+    vframeStream vfst(thread);
+    methodHandle inlinee = methodHandle(vfst.method());
+    inlinee->print_short_name(&ss1);
+    m->print_short_name(&ss2);
+    tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc %x", ss1.as_string(), vfst.bci(), ss2.as_string(), caller_frame.pc());
+  }
+
+
+  Deoptimization::deoptimize_frame(thread, caller_frame.id());
+
+JRT_END
 
 #ifndef PRODUCT
 void Runtime1::print_statistics() {
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -71,6 +71,7 @@
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
   stub(counter_overflow)             \
+  stub(predicate_failed_trap)        \
   last_entry(number_of_ids)
 
 #define DECLARE_STUB_ID(x)       x ## _id ,
@@ -190,6 +191,8 @@
   static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
   static int  is_instance_of(oopDesc* mirror, oopDesc* obj);
 
+  static void predicate_failed_trap(JavaThread* thread);
+
   static void print_statistics()                 PRODUCT_RETURN;
 };
 
--- a/hotspot/src/share/vm/c1/c1_ValueMap.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -26,9 +26,9 @@
 #include "c1/c1_Canonicalizer.hpp"
 #include "c1/c1_IR.hpp"
 #include "c1/c1_ValueMap.hpp"
+#include "c1/c1_ValueStack.hpp"
 #include "utilities/bitMap.inline.hpp"
 
-
 #ifndef PRODUCT
 
   int ValueMap::_number_of_finds = 0;
@@ -192,10 +192,6 @@
                    && lf->field()->holder() == field->holder()                           \
                    && (all_offsets || lf->field()->offset() == field->offset());
 
-#define MUST_KILL_EXCEPTION(must_kill, entry, value)                                     \
-  assert(entry->nesting() < nesting(), "must not find bigger nesting than current");     \
-  bool must_kill = (entry->nesting() == nesting() - 1);
-
 
 void ValueMap::kill_memory() {
   GENERIC_KILL_VALUE(MUST_KILL_MEMORY);
@@ -209,11 +205,6 @@
   GENERIC_KILL_VALUE(MUST_KILL_FIELD);
 }
 
-void ValueMap::kill_exception() {
-  GENERIC_KILL_VALUE(MUST_KILL_EXCEPTION);
-}
-
-
 void ValueMap::kill_map(ValueMap* map) {
   assert(is_global_value_numbering(), "only for global value numbering");
   _killed_values.set_union(&map->_killed_values);
@@ -274,6 +265,8 @@
   GlobalValueNumbering* _gvn;
   BlockList             _loop_blocks;
   bool                  _too_complicated_loop;
+  bool                  _has_field_store[T_ARRAY + 1];
+  bool                  _has_indexed_store[T_ARRAY + 1];
 
   // simplified access to methods of GlobalValueNumbering
   ValueMap* current_map()                        { return _gvn->current_map(); }
@@ -281,8 +274,16 @@
 
   // implementation for abstract methods of ValueNumberingVisitor
   void      kill_memory()                                 { _too_complicated_loop = true; }
-  void      kill_field(ciField* field, bool all_offsets)  { current_map()->kill_field(field, all_offsets); };
-  void      kill_array(ValueType* type)                   { current_map()->kill_array(type); };
+  void      kill_field(ciField* field, bool all_offsets)  {
+    current_map()->kill_field(field, all_offsets);
+    assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
+    _has_field_store[field->type()->basic_type()] = true;
+  }
+  void      kill_array(ValueType* type)                   {
+    current_map()->kill_array(type);
+    BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
+    _has_indexed_store[basic_type] = true;
+  }
 
  public:
   ShortLoopOptimizer(GlobalValueNumbering* gvn)
@@ -290,11 +291,141 @@
     , _loop_blocks(ValueMapMaxLoopSize)
     , _too_complicated_loop(false)
   {
+    for (int i=0; i<= T_ARRAY; i++){
+      _has_field_store[i] = false;
+      _has_indexed_store[i] = false;
+    }
+  }
+
+  bool has_field_store(BasicType type) {
+    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+    return _has_field_store[type];
+  }
+
+  bool has_indexed_store(BasicType type) {
+    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+    return _has_indexed_store[type];
   }
 
   bool process(BlockBegin* loop_header);
 };
 
+class LoopInvariantCodeMotion : public StackObj  {
+ private:
+  GlobalValueNumbering* _gvn;
+  ShortLoopOptimizer*   _short_loop_optimizer;
+  Instruction*          _insertion_point;
+  ValueStack *          _state;
+
+  void set_invariant(Value v) const    { _gvn->set_processed(v); }
+  bool is_invariant(Value v) const     { return _gvn->is_processed(v); }
+
+  void process_block(BlockBegin* block);
+
+ public:
+  LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks);
+};
+
+LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks)
+  : _gvn(gvn), _short_loop_optimizer(slo) {
+
+  TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
+  TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id()));
+
+  BlockBegin* insertion_block = loop_header->dominator();
+  if (insertion_block->number_of_preds() == 0) {
+    return;  // only the entry block does not have a predecessor
+  }
+
+  assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
+  _insertion_point = insertion_block->end()->prev();
+
+  BlockEnd *block_end = insertion_block->end();
+  _state = block_end->state_before();
+
+  if (!_state) {
+    // If, TableSwitch and LookupSwitch always have state_before when
+    // loop invariant code motion happens..
+    assert(block_end->as_Goto(), "Block has to be goto");
+    _state = block_end->state();
+  }
+
+  // the loop_blocks are filled by going backward from the loop header, so this processing order is best
+  assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
+  process_block(loop_header);
+  for (int i = loop_blocks->length() - 1; i >= 1; i--) {
+    process_block(loop_blocks->at(i));
+  }
+}
+
+void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
+  TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id()));
+
+  Instruction* prev = block;
+  Instruction* cur = block->next();
+
+  while (cur != NULL) {
+
+    // determine if cur instruction is loop invariant
+    // only selected instruction types are processed here
+    bool cur_invariant = false;
+
+    if (cur->as_Constant() != NULL) {
+      cur_invariant = !cur->can_trap();
+    } else if (cur->as_ArithmeticOp() != NULL || cur->as_LogicOp() != NULL || cur->as_ShiftOp() != NULL) {
+      assert(cur->as_Op2() != NULL, "must be Op2");
+      Op2* op2 = (Op2*)cur;
+      cur_invariant = !op2->can_trap() && is_invariant(op2->x()) && is_invariant(op2->y());
+    } else if (cur->as_LoadField() != NULL) {
+      LoadField* lf = (LoadField*)cur;
+      // deoptimizes on NullPointerException
+      cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());
+    } else if (cur->as_ArrayLength() != NULL) {
+      ArrayLength *length = cur->as_ArrayLength();
+      cur_invariant = is_invariant(length->array());
+    } else if (cur->as_LoadIndexed() != NULL) {
+      LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();
+      cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());
+    }
+
+    if (cur_invariant) {
+      // perform value numbering and mark instruction as loop-invariant
+      _gvn->substitute(cur);
+
+      if (cur->as_Constant() == NULL) {
+        // ensure that code for non-constant instructions is always generated
+        cur->pin();
+      }
+
+      // remove cur instruction from loop block and append it to block before loop
+      Instruction* next = cur->next();
+      Instruction* in = _insertion_point->next();
+      _insertion_point = _insertion_point->set_next(cur);
+      cur->set_next(in);
+
+      //  Deoptimize on exception
+      cur->set_flag(Instruction::DeoptimizeOnException, true);
+
+      //  Clear exception handlers
+      cur->set_exception_handlers(NULL);
+
+      TRACE_VALUE_NUMBERING(tty->print_cr("Instruction %c%d is loop invariant", cur->type()->tchar(), cur->id()));
+
+      if (cur->state_before() != NULL) {
+        cur->set_state_before(_state->copy());
+      }
+      if (cur->exception_state() != NULL) {
+        cur->set_exception_state(_state->copy());
+      }
+
+      cur = prev->set_next(next);
+
+    } else {
+      prev = cur;
+      cur = cur->next();
+    }
+  }
+}
 
 bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
   TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block"));
@@ -316,6 +447,10 @@
     for (int j = block->number_of_preds() - 1; j >= 0; j--) {
       BlockBegin* pred = block->pred_at(j);
 
+      if (pred->is_set(BlockBegin::osr_entry_flag)) {
+        return false;
+      }
+
       ValueMap* pred_map = value_map_of(pred);
       if (pred_map != NULL) {
         current_map()->kill_map(pred_map);
@@ -336,6 +471,12 @@
     }
   }
 
+  bool optimistic = this->_gvn->compilation()->is_optimistic();
+
+  if (UseLoopInvariantCodeMotion && optimistic) {
+    LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
+  }
+
   TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized"));
   return true;
 }
@@ -344,11 +485,11 @@
 GlobalValueNumbering::GlobalValueNumbering(IR* ir)
   : _current_map(NULL)
   , _value_maps(ir->linear_scan_order()->length(), NULL)
+  , _compilation(ir->compilation())
 {
   TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering"));
 
   ShortLoopOptimizer short_loop_optimizer(this);
-  int subst_count = 0;
 
   BlockList* blocks = ir->linear_scan_order();
   int num_blocks = blocks->length();
@@ -357,6 +498,12 @@
   assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block");
   assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions");
 
+  // method parameters are not linked in instructions list, so process them separateley
+  for_each_state_value(start_block->state(), value,
+     assert(value->as_Local() != NULL, "only method parameters allowed");
+     set_processed(value);
+  );
+
   // initial, empty value map with nesting 0
   set_value_map_of(start_block, new ValueMap());
 
@@ -374,7 +521,7 @@
     // create new value map with increased nesting
     _current_map = new ValueMap(value_map_of(dominator));
 
-    if (num_preds == 1) {
+    if (num_preds == 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
       assert(dominator == block->pred_at(0), "dominator must be equal to predecessor");
       // nothing to do here
 
@@ -403,36 +550,41 @@
       }
     }
 
-    if (block->is_set(BlockBegin::exception_entry_flag)) {
-      current_map()->kill_exception();
-    }
+    // phi functions are not linked in instructions list, so process them separateley
+    for_each_phi_fun(block, phi,
+      set_processed(phi);
+    );
 
     TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print());
 
     // visit all instructions of this block
     for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
-      assert(!instr->has_subst(), "substitution already set");
-
       // check if instruction kills any values
       instr->visit(this);
-
-      if (instr->hash() != 0) {
-        Value f = current_map()->find_insert(instr);
-        if (f != instr) {
-          assert(!f->has_subst(), "can't have a substitution");
-          instr->set_subst(f);
-          subst_count++;
-        }
-      }
+      // perform actual value numbering
+      substitute(instr);
     }
 
     // remember value map for successors
     set_value_map_of(block, current_map());
   }
 
-  if (subst_count != 0) {
+  if (_has_substitutions) {
     SubstitutionResolver resolver(ir);
   }
 
   TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics());
 }
+
+void GlobalValueNumbering::substitute(Instruction* instr) {
+  assert(!instr->has_subst(), "substitution already set");
+  Value subst = current_map()->find_insert(instr);
+  if (subst != instr) {
+    assert(!subst->has_subst(), "can't have a substitution");
+
+    TRACE_VALUE_NUMBERING(tty->print_cr("substitution for %d set to %d", instr->id(), subst->id()));
+    instr->set_subst(subst);
+    _has_substitutions = true;
+  }
+  set_processed(instr);
+}
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -206,6 +206,8 @@
   void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   void do_MemBar         (MemBar*          x) { /* nothing to do */ };
+  void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
+  void do_Assert         (Assert*          x) { /* nothing to do */ };
 };
 
 
@@ -225,15 +227,22 @@
 
 class GlobalValueNumbering: public ValueNumberingVisitor {
  private:
+  Compilation*  _compilation;     // compilation data
   ValueMap*     _current_map;     // value map of current block
   ValueMapArray _value_maps;      // list of value maps for all blocks
+  ValueSet      _processed_values;  // marker for instructions that were already processed
+  bool          _has_substitutions; // set to true when substitutions must be resolved
 
  public:
   // accessors
+  Compilation*  compilation() const              { return _compilation; }
   ValueMap*     current_map()                    { return _current_map; }
   ValueMap*     value_map_of(BlockBegin* block)  { return _value_maps.at(block->linear_scan_number()); }
   void          set_value_map_of(BlockBegin* block, ValueMap* map)   { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
 
+  bool          is_processed(Value v)            { return _processed_values.contains(v); }
+  void          set_processed(Value v)           { _processed_values.put(v); }
+
   // implementation for abstract methods of ValueNumberingVisitor
   void          kill_memory()                                 { current_map()->kill_memory(); }
   void          kill_field(ciField* field, bool all_offsets)  { current_map()->kill_field(field, all_offsets); }
@@ -241,6 +250,7 @@
 
   // main entry point that performs global value numbering
   GlobalValueNumbering(IR* ir);
+  void          substitute(Instruction* instr);  // substitute instruction if it is contained in current value map
 };
 
 #endif // SHARE_VM_C1_C1_VALUEMAP_HPP
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -119,6 +119,24 @@
   develop(bool, UseGlobalValueNumbering, true,                              \
           "Use Global Value Numbering (separate phase)")                    \
                                                                             \
+  product(bool, UseLoopInvariantCodeMotion, true,                           \
+          "Simple loop invariant code motion for short loops during GVN")   \
+                                                                            \
+  develop(bool, TracePredicateFailedTraps, false,                           \
+          "trace runtime traps caused by predicate failure")                \
+                                                                            \
+  develop(bool, StressLoopInvariantCodeMotion, false,                       \
+          "stress loop invariant code motion")                              \
+                                                                            \
+  develop(bool, TraceRangeCheckElimination, false,                          \
+          "Trace Range Check Elimination")                                  \
+                                                                            \
+  develop(bool, AssertRangeCheckElimination, false,                         \
+          "Assert Range Check Elimination")                                 \
+                                                                            \
+  develop(bool, StressRangeCheckElimination, false,                         \
+          "stress Range Check Elimination")                                 \
+                                                                            \
   develop(bool, PrintValueNumbering, false,                                 \
           "Print Value Numbering")                                          \
                                                                             \
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -790,6 +790,17 @@
   return count;
 }
 
+
+// ------------------------------------------------------------------
+// ciMethod::is_special_get_caller_class_method
+//
+bool ciMethod::is_ignored_by_security_stack_walk() const {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  return get_Method()->is_ignored_by_security_stack_walk();
+}
+
+
 // ------------------------------------------------------------------
 // invokedynamic support
 
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -166,8 +166,9 @@
   // Code size for inlining decisions.
   int code_size_for_inlining();
 
-  bool force_inline() { return get_Method()->force_inline(); }
-  bool dont_inline()  { return get_Method()->dont_inline();  }
+  bool caller_sensitive() { return get_Method()->caller_sensitive(); }
+  bool force_inline()     { return get_Method()->force_inline();     }
+  bool dont_inline()      { return get_Method()->dont_inline();      }
 
   int comp_level();
   int highest_osr_comp_level();
@@ -264,6 +265,9 @@
   int instructions_size();
   int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
 
+  // Stack walking support
+  bool is_ignored_by_security_stack_walk() const;
+
   // JSR 292 support
   bool is_method_handle_intrinsic()  const;
   bool is_compiled_lambda_form() const;
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1735,9 +1735,14 @@
                                                                 Symbol* name) {
   vmSymbols::SID sid = vmSymbols::find_sid(name);
   // Privileged code can use all annotations.  Other code silently drops some.
-  bool privileged = loader_data->is_the_null_class_loader_data() ||
-                    loader_data->is_anonymous();
+  const bool privileged = loader_data->is_the_null_class_loader_data() ||
+                          loader_data->is_ext_class_loader_data() ||
+                          loader_data->is_anonymous();
   switch (sid) {
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature):
+    if (_location != _in_method)  break;  // only allow for methods
+    if (!privileged)              break;  // only allow in privileged code
+    return _method_CallerSensitive;
   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
     if (_location != _in_method)  break;  // only allow for methods
     if (!privileged)              break;  // only allow in privileged code
@@ -1775,6 +1780,8 @@
 }
 
 void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
+  if (has_annotation(_method_CallerSensitive))
+    m->set_caller_sensitive(true);
   if (has_annotation(_method_ForceInline))
     m->set_force_inline(true);
   if (has_annotation(_method_DontInline))
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -119,6 +119,7 @@
     enum Location { _in_field, _in_method, _in_class };
     enum ID {
       _unknown = 0,
+      _method_CallerSensitive,
       _method_ForceInline,
       _method_DontInline,
       _method_LambdaForm_Compiled,
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -321,6 +321,13 @@
   }
 }
 
+/**
+ * Returns true if this class loader data is for the extension class loader.
+ */
+bool ClassLoaderData::is_ext_class_loader_data() const {
+  return SystemDictionary::is_ext_class_loader(class_loader());
+}
+
 Metaspace* ClassLoaderData::metaspace_non_null() {
   assert(!DumpSharedSpaces, "wrong metaspace!");
   // If the metaspace has not been allocated, create a new one.  Might want
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -191,6 +191,7 @@
   bool is_the_null_class_loader_data() const {
     return this == _the_null_class_loader_data;
   }
+  bool is_ext_class_loader_data() const;
 
   // The Metaspace is created lazily so may be NULL.  This
   // method will allocate a Metaspace if needed.
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1050,15 +1050,16 @@
 
   // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
   enum {
-    MN_IS_METHOD           = 0x00010000, // method (not constructor)
-    MN_IS_CONSTRUCTOR      = 0x00020000, // constructor
-    MN_IS_FIELD            = 0x00040000, // field
-    MN_IS_TYPE             = 0x00080000, // nested type
+    MN_IS_METHOD            = 0x00010000, // method (not constructor)
+    MN_IS_CONSTRUCTOR       = 0x00020000, // constructor
+    MN_IS_FIELD             = 0x00040000, // field
+    MN_IS_TYPE              = 0x00080000, // nested type
+    MN_CALLER_SENSITIVE     = 0x00100000, // @CallerSensitive annotation detected
     MN_REFERENCE_KIND_SHIFT = 24, // refKind
-    MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
+    MN_REFERENCE_KIND_MASK  = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
     // The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers:
-    MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes
-    MN_SEARCH_INTERFACES   = 0x00200000  // walk implemented interfaces
+    MN_SEARCH_SUPERCLASSES  = 0x00100000, // walk super classes
+    MN_SEARCH_INTERFACES    = 0x00200000  // walk implemented interfaces
   };
 
   // Accessors for code generation:
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -146,6 +146,17 @@
    }
    return false;
 }
+
+/**
+ * Returns true if the passed class loader is the extension class loader.
+ */
+bool SystemDictionary::is_ext_class_loader(Handle class_loader) {
+  if (class_loader.is_null()) {
+    return false;
+  }
+  return (class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_ExtClassLoader());
+}
+
 // ----------------------------------------------------------------------------
 // Resolving of classes
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -106,6 +106,7 @@
   do_klass(ThreadDeath_klass,                           java_lang_ThreadDeath,                     Pre                 ) \
   do_klass(Exception_klass,                             java_lang_Exception,                       Pre                 ) \
   do_klass(RuntimeException_klass,                      java_lang_RuntimeException,                Pre                 ) \
+  do_klass(SecurityManager_klass,                       java_lang_SecurityManager,                 Pre                 ) \
   do_klass(ProtectionDomain_klass,                      java_security_ProtectionDomain,            Pre                 ) \
   do_klass(AccessControlContext_klass,                  java_security_AccessControlContext,        Pre                 ) \
   do_klass(ClassNotFoundException_klass,                java_lang_ClassNotFoundException,          Pre                 ) \
@@ -138,13 +139,14 @@
   /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */                              \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */                                                   \
   /* It's okay if this turns out to be NULL in non-1.4 JDKs. */                                                          \
-  do_klass(lambda_MagicLambdaImpl_klass,                java_lang_invoke_MagicLambdaImpl, Opt ) \
+  do_klass(lambda_MagicLambdaImpl_klass,                java_lang_invoke_MagicLambdaImpl,          Opt                 ) \
   do_klass(reflect_MagicAccessorImpl_klass,             sun_reflect_MagicAccessorImpl,             Opt                 ) \
   do_klass(reflect_MethodAccessorImpl_klass,            sun_reflect_MethodAccessorImpl,            Opt_Only_JDK14NewRef) \
   do_klass(reflect_ConstructorAccessorImpl_klass,       sun_reflect_ConstructorAccessorImpl,       Opt_Only_JDK14NewRef) \
   do_klass(reflect_DelegatingClassLoader_klass,         sun_reflect_DelegatingClassLoader,         Opt                 ) \
   do_klass(reflect_ConstantPool_klass,                  sun_reflect_ConstantPool,                  Opt_Only_JDK15      ) \
   do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15      ) \
+  do_klass(reflect_CallerSensitive_klass,               sun_reflect_CallerSensitive,               Opt                 ) \
                                                                                                                          \
   /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                            \
   do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre_JSR292          ) \
@@ -628,12 +630,15 @@
   static bool is_parallelCapable(Handle class_loader);
   static bool is_parallelDefine(Handle class_loader);
 
+public:
+  static bool is_ext_class_loader(Handle class_loader);
+
+private:
   static Klass* find_shared_class(Symbol* class_name);
 
   // Setup link to hierarchy
   static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
 
-private:
   // We pass in the hashtable index so we can calculate it outside of
   // the SystemDictionary_lock.
 
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -91,6 +91,7 @@
   template(java_lang_StringBuffer,                    "java/lang/StringBuffer")                   \
   template(java_lang_StringBuilder,                   "java/lang/StringBuilder")                  \
   template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
+  template(java_lang_SecurityManager,                 "java/lang/SecurityManager")                \
   template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
   template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
   template(java_io_OutputStream,                      "java/io/OutputStream")                     \
@@ -211,6 +212,8 @@
   template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
   template(sun_reflect_DelegatingClassLoader,         "sun/reflect/DelegatingClassLoader")        \
   template(sun_reflect_Reflection,                    "sun/reflect/Reflection")                   \
+  template(sun_reflect_CallerSensitive,               "sun/reflect/CallerSensitive")              \
+  template(sun_reflect_CallerSensitive_signature,     "Lsun/reflect/CallerSensitive;")            \
   template(checkedExceptions_name,                    "checkedExceptions")                        \
   template(clazz_name,                                "clazz")                                    \
   template(exceptionTypes_name,                       "exceptionTypes")                           \
@@ -343,6 +346,7 @@
   template(contextClassLoader_name,                   "contextClassLoader")                       \
   template(inheritedAccessControlContext_name,        "inheritedAccessControlContext")            \
   template(isPrivileged_name,                         "isPrivileged")                             \
+  template(getClassContext_name,                      "getClassContext")                          \
   template(wait_name,                                 "wait")                                     \
   template(checkPackageAccess_name,                   "checkPackageAccess")                       \
   template(stackSize_name,                            "stackSize")                                \
@@ -463,6 +467,7 @@
   template(void_classloader_signature,                "()Ljava/lang/ClassLoader;")                                \
   template(void_object_signature,                     "()Ljava/lang/Object;")                                     \
   template(void_class_signature,                      "()Ljava/lang/Class;")                                      \
+  template(void_class_array_signature,                "()[Ljava/lang/Class;")                                     \
   template(void_string_signature,                     "()Ljava/lang/String;")                                     \
   template(object_array_object_signature,             "([Ljava/lang/Object;)Ljava/lang/Object;")                  \
   template(object_object_array_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
@@ -705,9 +710,8 @@
   do_intrinsic(_getLength,                java_lang_reflect_Array, getLength_name, object_int_signature,         F_SN)  \
    do_name(     getLength_name,                                   "getLength")                                          \
                                                                                                                         \
-  do_intrinsic(_getCallerClass,           sun_reflect_Reflection, getCallerClass_name, getCallerClass_signature, F_SN)  \
+  do_intrinsic(_getCallerClass,           sun_reflect_Reflection, getCallerClass_name, void_class_signature,     F_SN)  \
    do_name(     getCallerClass_name,                             "getCallerClass")                                      \
-   do_signature(getCallerClass_signature,                        "(I)Ljava/lang/Class;")                                \
                                                                                                                         \
   do_intrinsic(_newArray,                 java_lang_reflect_Array, newArray_name, newArray_signature,            F_SN)  \
    do_name(     newArray_name,                                    "newArray")                                           \
--- a/hotspot/src/share/vm/code/codeCache.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -156,6 +156,11 @@
   static address  low_bound()                    { return (address) _heap->low_boundary(); }
   static address  high_bound()                   { return (address) _heap->high_boundary(); }
 
+  static bool has_space(int size) {
+    // Always leave some room in the CodeCache for I2C/C2I adapters
+    return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
+  }
+
   // Profiling
   static address first_address();                // first address used for CodeBlobs
   static address last_address();                 // last  address used for CodeBlobs
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -486,7 +486,6 @@
 #endif // def HAVE_DTRACE_H
 }
 
-
 nmethod* nmethod::new_native_nmethod(methodHandle method,
   int compile_id,
   CodeBuffer *code_buffer,
@@ -502,17 +501,19 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
-    CodeOffsets offsets;
-    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
-    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
-    nm = new (native_nmethod_size)
-      nmethod(method(), native_nmethod_size, compile_id, &offsets,
-              code_buffer, frame_size,
-              basic_lock_owner_sp_offset, basic_lock_sp_offset,
-              oop_maps);
-    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
+    if (CodeCache::has_space(native_nmethod_size)) {
+      CodeOffsets offsets;
+      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+      nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
+                                             compile_id, &offsets,
+                                             code_buffer, frame_size,
+                                             basic_lock_owner_sp_offset,
+                                             basic_lock_sp_offset, oop_maps);
+      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
+      if (PrintAssembly && nm != NULL)
+        Disassembler::decode(nm);
+    }
   }
   // verify nmethod
   debug_only(if (nm) nm->verify();) // might block
@@ -537,16 +538,19 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
-    CodeOffsets offsets;
-    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
-    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
-    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
+    if (CodeCache::has_space(nmethod_size)) {
+      CodeOffsets offsets;
+      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
+      offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
+      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 
-    nm = new (nmethod_size) nmethod(method(), nmethod_size, &offsets, code_buffer, frame_size);
+      nm = new (nmethod_size) nmethod(method(), nmethod_size,
+                                      &offsets, code_buffer, frame_size);
 
-    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
-    if (PrintAssembly && nm != NULL)
-      Disassembler::decode(nm);
+      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
+      if (PrintAssembly && nm != NULL)
+        Disassembler::decode(nm);
+    }
   }
   // verify nmethod
   debug_only(if (nm) nm->verify();) // might block
@@ -587,7 +591,8 @@
       + round_to(handler_table->size_in_bytes(), oopSize)
       + round_to(nul_chk_table->size_in_bytes(), oopSize)
       + round_to(debug_info->data_size()       , oopSize);
-    nm = new (nmethod_size)
+    if (CodeCache::has_space(nmethod_size)) {
+      nm = new (nmethod_size)
       nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
               orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
               oop_maps,
@@ -595,6 +600,7 @@
               nul_chk_table,
               compiler,
               comp_level);
+    }
     if (nm != NULL) {
       // To make dependency checking during class loading fast, record
       // the nmethod dependencies in the classes it is dependent on.
@@ -793,9 +799,9 @@
 #endif // def HAVE_DTRACE_H
 
 void* nmethod::operator new(size_t size, int nmethod_size) {
-  // Always leave some room in the CodeCache for I2C/C2I adapters
-  if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
-  return CodeCache::allocate(nmethod_size);
+  void*  alloc = CodeCache::allocate(nmethod_size);
+  guarantee(alloc != NULL, "CodeCache should have enough space");
+  return alloc;
 }
 
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2166,6 +2166,9 @@
     comp->print_timers();
   }
   tty->cr();
+  tty->print_cr("  Total compiled methods   : %6d methods", CompileBroker::_total_compile_count);
+  tty->print_cr("    Standard compilation   : %6d methods", CompileBroker::_total_standard_compile_count);
+  tty->print_cr("    On stack replacement   : %6d methods", CompileBroker::_total_osr_compile_count);
   int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
   tty->print_cr("  Total compiled bytecodes : %6d bytes", tcb);
   tty->print_cr("    Standard compilation   : %6d bytes", CompileBroker::_sum_standard_bytes_compiled);
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2228,8 +2228,6 @@
 }
 
 void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
-#ifdef COMPILER2
-  // Currently only used by C2.
   for (int m = 0; m < methods()->length(); m++) {
     MethodData* mdo = methods()->at(m)->method_data();
     if (mdo != NULL) {
@@ -2240,15 +2238,6 @@
       }
     }
   }
-#else
-#ifdef ASSERT
-  // Verify that we haven't started to use MDOs for C1.
-  for (int m = 0; m < methods()->length(); m++) {
-    MethodData* mdo = methods()->at(m)->method_data();
-    assert(mdo == NULL, "Didn't expect C1 to use MDOs");
-  }
-#endif // ASSERT
-#endif // !COMPILER2
 }
 
 
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -967,6 +967,32 @@
   return false;
 }
 
+
+/**
+ *  Returns true if this is one of the specially treated methods for
+ *  security related stack walks (like Reflection.getCallerClass).
+ */
+bool Method::is_ignored_by_security_stack_walk() const {
+  const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
+
+  assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
+  if (intrinsic_id() == vmIntrinsics::_invoke) {
+    // This is Method.invoke() -- ignore it
+    return true;
+  }
+  if (use_new_reflection &&
+      method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
+    // This is an auxilary frame -- ignore it
+    return true;
+  }
+  if (is_method_handle_intrinsic() || is_compiled_lambda_form()) {
+    // This is an internal adapter frame for method handles -- ignore it
+    return true;
+  }
+  return false;
+}
+
+
 // Constant pool structure for invoke methods:
 enum {
   _imcp_invoke_name = 1,        // utf8: 'invokeExact', etc.
@@ -1180,13 +1206,13 @@
   // because we are not loading from core libraries
   // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
   // which does not use the class default class loader so we check for its loader here
-  if ((InstanceKlass::cast(holder)->class_loader() != NULL) &&
-       InstanceKlass::cast(holder)->class_loader()->klass()->name() != vmSymbols::sun_misc_Launcher_ExtClassLoader()) {
+  InstanceKlass* ik = InstanceKlass::cast(holder);
+  if ((ik->class_loader() != NULL) && !SystemDictionary::is_ext_class_loader(ik->class_loader())) {
     return vmSymbols::NO_SID;   // regardless of name, no intrinsics here
   }
 
   // see if the klass name is well-known:
-  Symbol* klass_name = InstanceKlass::cast(holder)->name();
+  Symbol* klass_name = ik->name();
   return vmSymbols::find_sid(klass_name);
 }
 
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -118,11 +118,12 @@
 #endif
   u2                _method_size;                // size of this object
   u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
-  u1                _jfr_towrite  : 1,           // Flags
-                    _force_inline : 1,
-                    _hidden       : 1,
-                    _dont_inline  : 1,
-                                  : 4;
+  u1                _jfr_towrite      : 1,       // Flags
+                    _caller_sensitive : 1,
+                    _force_inline     : 1,
+                    _hidden           : 1,
+                    _dont_inline      : 1,
+                                      : 3;
   u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   u2                _number_of_breakpoints;      // fullspeed debugging support
   InvocationCounter _invocation_counter;         // Incremented before each activation of the method - used to trigger frequency-based optimizations
@@ -618,6 +619,9 @@
   // Reflection support
   bool is_overridden_in(Klass* k) const;
 
+  // Stack walking support
+  bool is_ignored_by_security_stack_walk() const;
+
   // JSR 292 support
   bool is_method_handle_intrinsic() const;          // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
   bool is_compiled_lambda_form() const;             // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
@@ -705,15 +709,16 @@
   void init_intrinsic_id();     // updates from _none if a match
   static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
 
-  bool jfr_towrite()                 { return _jfr_towrite; }
-  void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
-
-  bool     force_inline()       { return _force_inline;     }
-  void set_force_inline(bool x) {        _force_inline = x; }
-  bool     dont_inline()        { return _dont_inline;      }
-  void set_dont_inline(bool x)  {        _dont_inline = x;  }
-  bool  is_hidden()             { return _hidden;           }
-  void set_hidden(bool x)       {        _hidden = x;       }
+  bool     jfr_towrite()            { return _jfr_towrite;          }
+  void set_jfr_towrite(bool x)      {        _jfr_towrite = x;      }
+  bool     caller_sensitive()       { return _caller_sensitive;     }
+  void set_caller_sensitive(bool x) {        _caller_sensitive = x; }
+  bool     force_inline()           { return _force_inline;         }
+  void set_force_inline(bool x)     {        _force_inline = x;     }
+  bool     dont_inline()            { return _dont_inline;          }
+  void set_dont_inline(bool x)      {        _dont_inline = x;      }
+  bool  is_hidden()                 { return _hidden;               }
+  void set_hidden(bool x)           {        _hidden = x;           }
   ConstMethod::MethodType method_type() const {
       return _constMethod->method_type();
   }
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -392,6 +392,9 @@
 }
 
 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
+#if defined(COMPILER1) && !defined(COMPILER2)
+  return no_profile_data;
+#else
   switch (code) {
   case Bytecodes::_checkcast:
   case Bytecodes::_instanceof:
@@ -438,6 +441,7 @@
     return variable_cell_count;
   }
   return no_profile_data;
+#endif
 }
 
 // Compute the size of the profiling information corresponding to
@@ -509,6 +513,9 @@
 // the segment in bytes.
 int MethodData::initialize_data(BytecodeStream* stream,
                                        int data_index) {
+#if defined(COMPILER1) && !defined(COMPILER2)
+  return 0;
+#else
   int cell_count = -1;
   int tag = DataLayout::no_tag;
   DataLayout* data_layout = data_layout_at(data_index);
@@ -587,6 +594,7 @@
     assert(!bytecode_has_profile(c), "agree w/ !BHP");
     return 0;
   }
+#endif
 }
 
 // Get the data at an arbitrary (sort of) data index.
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -3445,7 +3445,6 @@
 
 void GraphKit::final_sync(IdealKit& ideal) {
   // Final sync IdealKit and graphKit.
-  __ drain_delay_transform();
   sync_kit(ideal);
 }
 
--- a/hotspot/src/share/vm/opto/idealKit.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/idealKit.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -48,9 +48,9 @@
   _cvstate = NULL;
   // We can go memory state free or else we need the entire memory state
   assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split");
+  assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase");
   int init_size = 5;
   _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
-  _delay_transform  = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
   DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
   if (!has_declarations) {
      declarations_done();
@@ -296,19 +296,16 @@
     return delay_transform(n);
   } else {
     n = gvn().transform(n);
-    if (!gvn().is_IterGVN()) {
-      C->record_for_igvn(n);
-    }
+    C->record_for_igvn(n);
     return n;
   }
 }
 
 //-----------------------------delay_transform-----------------------------------
 Node* IdealKit::delay_transform(Node* n) {
-  if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
-    gvn().set_type(n, n->bottom_type());
-  }
-  _delay_transform->push(n);
+  // Delay transform until IterativeGVN
+  gvn().set_type(n, n->bottom_type());
+  C->record_for_igvn(n);
   return n;
 }
 
@@ -332,17 +329,6 @@
   for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL);
 }
 
-//-----------------------------drain_delay_transform----------------------------
-void IdealKit::drain_delay_transform() {
-  while (_delay_transform->length() > 0) {
-    Node* n = _delay_transform->pop();
-    gvn().transform(n);
-    if (!gvn().is_IterGVN()) {
-      C->record_for_igvn(n);
-    }
-  }
-}
-
 //-----------------------------IdealVariable----------------------------
 IdealVariable::IdealVariable(IdealKit &k) {
   k.declare(this);
@@ -351,9 +337,7 @@
 Node* IdealKit::memory(uint alias_idx) {
   MergeMemNode* mem = merged_memory();
   Node* p = mem->memory_at(alias_idx);
-  if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
-    _gvn.set_type(p, Type::MEMORY);  // must be mapped
-  }
+  _gvn.set_type(p, Type::MEMORY);  // must be mapped
   return p;
 }
 
--- a/hotspot/src/share/vm/opto/idealKit.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/idealKit.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -102,7 +102,6 @@
   Compile * const C;
   PhaseGVN &_gvn;
   GrowableArray<Node*>* _pending_cvstates; // stack of cvstates
-  GrowableArray<Node*>* _delay_transform;  // delay invoking gvn.transform until drain
   Node* _cvstate;                          // current cvstate (control, memory and variables)
   uint _var_ct;                            // number of variables
   bool _delay_all_transforms;              // flag forcing all transforms to be delayed
@@ -121,7 +120,7 @@
   void clear(Node* m);                     // clear a cvstate
   void stop() { clear(_cvstate); }         // clear current cvstate
   Node* delay_transform(Node* n);
-  Node* transform(Node* n);                // gvn.transform or push node on delay list
+  Node* transform(Node* n);                // gvn.transform or skip it
   Node* promote_to_phi(Node* n, Node* reg);// Promote "n" to a phi on region "reg"
   bool was_promoted_to_phi(Node* n, Node* reg) {
     return (n->is_Phi() && n->in(0) == reg);
@@ -146,7 +145,6 @@
   IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false);
   ~IdealKit() {
     stop();
-    drain_delay_transform();
   }
   void sync_kit(GraphKit* gkit);
 
@@ -173,7 +171,6 @@
   void bind(Node* lab);
   void goto_(Node* lab, bool bind = false);
   void declarations_done();
-  void drain_delay_transform();
 
   Node* IfTrue(IfNode* iff)  { return transform(new (C) IfTrueNode(iff)); }
   Node* IfFalse(IfNode* iff) { return transform(new (C) IfFalseNode(iff)); }
@@ -198,7 +195,11 @@
   Node* thread()  {  return gvn().transform(new (C) ThreadLocalNode()); }
 
   // Pointers
-  Node* AddP(Node *base, Node *ptr, Node *off) { return transform(new (C) AddPNode(base, ptr, off)); }
+
+  // Raw address should be transformed regardless 'delay_transform' flag
+  // to produce canonical form CastX2P(offset).
+  Node* AddP(Node *base, Node *ptr, Node *off) { return _gvn.transform(new (C) AddPNode(base, ptr, off)); }
+
   Node* CmpP(Node* l, Node* r) { return transform(new (C) CmpPNode(l, r)); }
 #ifdef _LP64
   Node* XorX(Node* l, Node* r) { return transform(new (C) XorLNode(l, r)); }
@@ -208,8 +209,6 @@
   Node* URShiftX(Node* l, Node* r) { return transform(new (C) URShiftXNode(l, r)); }
   Node* ConX(jint k) { return (Node*)gvn().MakeConX(k); }
   Node* CastPX(Node* ctl, Node* p) { return transform(new (C) CastP2XNode(ctl, p)); }
-  // Add a fixed offset to a pointer
-  Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset);
 
   // Memory operations
 
--- a/hotspot/src/share/vm/opto/ifg.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -37,8 +37,6 @@
 #include "opto/memnode.hpp"
 #include "opto/opcodes.hpp"
 
-#define EXACT_PRESSURE 1
-
 //=============================================================================
 //------------------------------IFG--------------------------------------------
 PhaseIFG::PhaseIFG( Arena *arena ) : Phase(Interference_Graph), _arena(arena) {
@@ -445,23 +443,15 @@
       pressure[1] -= lrg->reg_pressure();
       if( pressure[1] == (uint)FLOATPRESSURE ) {
         hrp_index[1] = where;
-#ifdef EXACT_PRESSURE
-      if( pressure[1] > b->_freg_pressure )
-        b->_freg_pressure = pressure[1]+1;
-#else
-        b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
+        if( pressure[1] > b->_freg_pressure )
+          b->_freg_pressure = pressure[1]+1;
       }
     } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
       pressure[0] -= lrg->reg_pressure();
       if( pressure[0] == (uint)INTPRESSURE   ) {
         hrp_index[0] = where;
-#ifdef EXACT_PRESSURE
-      if( pressure[0] > b->_reg_pressure )
-        b->_reg_pressure = pressure[0]+1;
-#else
-        b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
+        if( pressure[0] > b->_reg_pressure )
+          b->_reg_pressure = pressure[0]+1;
       }
     }
   }
@@ -526,17 +516,13 @@
       if (lrg.mask().is_UP() && lrg.mask_size()) {
         if (lrg._is_float || lrg._is_vector) {   // Count float pressure
           pressure[1] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
           if( pressure[1] > b->_freg_pressure )
             b->_freg_pressure = pressure[1];
-#endif
           // Count int pressure, but do not count the SP, flags
         } else if( lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
           pressure[0] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
           if( pressure[0] > b->_reg_pressure )
             b->_reg_pressure = pressure[0];
-#endif
         }
       }
     }
@@ -589,30 +575,20 @@
             RegMask itmp = lrgs(r).mask();
             itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
             int iregs = itmp.Size();
-#ifdef EXACT_PRESSURE
             if( pressure[0]+iregs > b->_reg_pressure )
               b->_reg_pressure = pressure[0]+iregs;
-#endif
             if( pressure[0]       <= (uint)INTPRESSURE &&
                 pressure[0]+iregs >  (uint)INTPRESSURE ) {
-#ifndef EXACT_PRESSURE
-              b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
               hrp_index[0] = j-1;
             }
             // Count the float-only registers
             RegMask ftmp = lrgs(r).mask();
             ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
             int fregs = ftmp.Size();
-#ifdef EXACT_PRESSURE
             if( pressure[1]+fregs > b->_freg_pressure )
               b->_freg_pressure = pressure[1]+fregs;
-#endif
             if( pressure[1]       <= (uint)FLOATPRESSURE &&
                 pressure[1]+fregs >  (uint)FLOATPRESSURE ) {
-#ifndef EXACT_PRESSURE
-              b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
               hrp_index[1] = j-1;
             }
           }
@@ -769,16 +745,12 @@
             if (lrg.mask().is_UP() && lrg.mask_size()) {
               if (lrg._is_float || lrg._is_vector) {
                 pressure[1] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
                 if( pressure[1] > b->_freg_pressure )
                   b->_freg_pressure = pressure[1];
-#endif
               } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
                 pressure[0] += lrg.reg_pressure();
-#ifdef EXACT_PRESSURE
                 if( pressure[0] > b->_reg_pressure )
                   b->_reg_pressure = pressure[0];
-#endif
               }
             }
             assert( pressure[0] == count_int_pressure  (&liveout), "" );
@@ -794,21 +766,13 @@
     // the whole block is high pressure.
     if( pressure[0] > (uint)INTPRESSURE   ) {
       hrp_index[0] = 0;
-#ifdef EXACT_PRESSURE
       if( pressure[0] > b->_reg_pressure )
         b->_reg_pressure = pressure[0];
-#else
-      b->_reg_pressure = (uint)INTPRESSURE+1;
-#endif
     }
     if( pressure[1] > (uint)FLOATPRESSURE ) {
       hrp_index[1] = 0;
-#ifdef EXACT_PRESSURE
       if( pressure[1] > b->_freg_pressure )
         b->_freg_pressure = pressure[1];
-#else
-      b->_freg_pressure = (uint)FLOATPRESSURE+1;
-#endif
     }
 
     // Compute high pressure indice; avoid landing in the middle of projnodes
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -231,7 +231,6 @@
   void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
   bool inline_native_clone(bool is_virtual);
   bool inline_native_Reflection_getCallerClass();
-  bool is_method_invoke_or_aux_frame(JVMState* jvms);
   // Helper function for inlining native object hash method
   bool inline_native_hashcode(bool is_virtual, bool is_static);
   bool inline_native_getClass();
@@ -393,7 +392,7 @@
   case vmIntrinsics::_getCallerClass:
     if (!UseNewReflection)  return NULL;
     if (!InlineReflectionGetCallerClass)  return NULL;
-    if (!JDK_Version::is_gte_jdk14x_version())  return NULL;
+    if (SystemDictionary::reflect_CallerSensitive_klass() == NULL)  return NULL;
     break;
 
   case vmIntrinsics::_bitCount_i:
@@ -3872,13 +3871,13 @@
 }
 
 //-----------------inline_native_Reflection_getCallerClass---------------------
-// public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip);
+// public static native Class<?> sun.reflect.Reflection.getCallerClass();
 //
 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
 //
-// NOTE that this code must perform the same logic as
-// vframeStream::security_get_caller_frame in that it must skip
-// Method.invoke() and auxiliary frames.
+// NOTE: This code must perform the same logic as JVM_GetCallerClass
+// in that it must skip particular security frames and checks for
+// caller sensitive methods.
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
 #ifndef PRODUCT
   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@@ -3886,35 +3885,6 @@
   }
 #endif
 
-  Node* caller_depth_node = argument(0);
-
-  // The depth value must be a constant in order for the runtime call
-  // to be eliminated.
-  const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
-  if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
-#ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
-      tty->print_cr("  Bailing out because caller depth was not a constant");
-    }
-#endif
-    return false;
-  }
-  // Note that the JVM state at this point does not include the
-  // getCallerClass() frame which we are trying to inline. The
-  // semantics of getCallerClass(), however, are that the "first"
-  // frame is the getCallerClass() frame, so we subtract one from the
-  // requested depth before continuing. We don't inline requests of
-  // getCallerClass(0).
-  int caller_depth = caller_depth_type->get_con() - 1;
-  if (caller_depth < 0) {
-#ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
-      tty->print_cr("  Bailing out because caller depth was %d", caller_depth);
-    }
-#endif
-    return false;
-  }
-
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
     if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
@@ -3923,95 +3893,67 @@
 #endif
     return false;
   }
-  int _depth = jvms()->depth();  // cache call chain depth
 
   // Walk back up the JVM state to find the caller at the required
-  // depth. NOTE that this code must perform the same logic as
-  // vframeStream::security_get_caller_frame in that it must skip
-  // Method.invoke() and auxiliary frames. Note also that depth is
-  // 1-based (1 is the bottom of the inlining).
-  int inlining_depth = _depth;
-  JVMState* caller_jvms = NULL;
-
-  if (inlining_depth > 0) {
-    caller_jvms = jvms();
-    assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth");
-    do {
-      // The following if-tests should be performed in this order
-      if (is_method_invoke_or_aux_frame(caller_jvms)) {
-        // Skip a Method.invoke() or auxiliary frame
-      } else if (caller_depth > 0) {
-        // Skip real frame
-        --caller_depth;
-      } else {
-        // We're done: reached desired caller after skipping.
-        break;
+  // depth.
+  JVMState* caller_jvms = jvms();
+
+  // Cf. JVM_GetCallerClass
+  // NOTE: Start the loop at depth 1 because the current JVM state does
+  // not include the Reflection.getCallerClass() frame.
+  for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
+    ciMethod* m = caller_jvms->method();
+    switch (n) {
+    case 0:
+      fatal("current JVM state does not include the Reflection.getCallerClass frame");
+      break;
+    case 1:
+      // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
+      if (!m->caller_sensitive()) {
+#ifndef PRODUCT
+        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+          tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
+        }
+#endif
+        return false;  // bail-out; let JVM_GetCallerClass do the work
       }
-      caller_jvms = caller_jvms->caller();
-      --inlining_depth;
-    } while (inlining_depth > 0);
-  }
-
-  if (inlining_depth == 0) {
+      break;
+    default:
+      if (!m->is_ignored_by_security_stack_walk()) {
+        // We have reached the desired frame; return the holder class.
+        // Acquire method holder as java.lang.Class and push as constant.
+        ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
+        ciInstance* caller_mirror = caller_klass->java_mirror();
+        set_result(makecon(TypeInstPtr::make(caller_mirror)));
+
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
-      tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
-      tty->print_cr("  JVM state at this point:");
-      for (int i = _depth; i >= 1; i--) {
-        ciMethod* m = jvms()->of_depth(i)->method();
-        tty->print_cr("   %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
-      }
-    }
+        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+          tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
+          tty->print_cr("  JVM state at this point:");
+          for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
+            ciMethod* m = jvms()->of_depth(i)->method();
+            tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
+          }
+        }
 #endif
-    return false; // Reached end of inlining
+        return true;
+      }
+      break;
+    }
   }
 
-  // Acquire method holder as java.lang.Class
-  ciInstanceKlass* caller_klass  = caller_jvms->method()->holder();
-  ciInstance*      caller_mirror = caller_klass->java_mirror();
-
-  // Push this as a constant
-  set_result(makecon(TypeInstPtr::make(caller_mirror)));
-
 #ifndef PRODUCT
   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
-    tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
+    tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
     tty->print_cr("  JVM state at this point:");
-    for (int i = _depth; i >= 1; i--) {
+    for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
       ciMethod* m = jvms()->of_depth(i)->method();
-      tty->print_cr("   %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
+      tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
     }
   }
 #endif
-  return true;
-}
-
-// Helper routine for above
-bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
-  ciMethod* method = jvms->method();
-
-  // Is this the Method.invoke method itself?
-  if (method->intrinsic_id() == vmIntrinsics::_invoke)
-    return true;
-
-  // Is this a helper, defined somewhere underneath MethodAccessorImpl.
-  ciKlass* k = method->holder();
-  if (k->is_instance_klass()) {
-    ciInstanceKlass* ik = k->as_instance_klass();
-    for (; ik != NULL; ik = ik->super()) {
-      if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
-          ik == env()->find_system_klass(ik->name())) {
-        return true;
-      }
-    }
-  }
-  else if (method->is_method_handle_intrinsic() ||
-           method->is_compiled_lambda_form()) {
-    // This is an internal adapter frame from the MethodHandleCompiler -- skip it
-    return true;
-  }
-
-  return false;
+
+  return false;  // bail-out; let JVM_GetCallerClass do the work
 }
 
 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2251,6 +2251,11 @@
     return;
   }
 
+  // clear out the dead code after build_loop_late
+  while (_deadlist.size()) {
+    _igvn.remove_globally_dead_node(_deadlist.pop());
+  }
+
   if (stop_early) {
     assert(do_expensive_nodes, "why are we here?");
     if (process_expensive_nodes()) {
@@ -2260,9 +2265,7 @@
       // nodes again.
       C->set_major_progress();
     }
-
     _igvn.optimize();
-
     return;
   }
 
@@ -2273,11 +2276,6 @@
     eliminate_useless_predicates();
   }
 
-  // clear out the dead code
-  while(_deadlist.size()) {
-    _igvn.remove_globally_dead_node(_deadlist.pop());
-  }
-
 #ifndef PRODUCT
   C->verify_graph_edges();
   if (_verify_me) {             // Nested verify pass?
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/output.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -449,6 +449,17 @@
       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
       if (max_loop_pad > 0) {
         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
+        // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
+        // If either is the last instruction in this block, bump by
+        // max_loop_pad in lock-step with blk_size, so sizing
+        // calculations in subsequent blocks still can conservatively
+        // detect that it may the last instruction in this block.
+        if (last_call_adr == blk_starts[i]+blk_size) {
+          last_call_adr += max_loop_pad;
+        }
+        if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
+          last_avoid_back_to_back_adr += max_loop_pad;
+        }
         blk_size += max_loop_pad;
       }
     }
@@ -1193,8 +1204,6 @@
   int last_call_offset = -1;
   int last_avoid_back_to_back_offset = -1;
 #ifdef ASSERT
-  int block_alignment_padding = 0;
-
   uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
   uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
   uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
@@ -1228,8 +1237,6 @@
   Node *delay_slot = NULL;
 
   for (uint i=0; i < nblocks; i++) {
-    guarantee(blk_starts[i] >= (uint)cb->insts_size(),"should not increase size");
-
     Block *b = _cfg->_blocks[i];
 
     Node *head = b->head();
@@ -1250,14 +1257,6 @@
     jmp_offset[i] = 0;
     jmp_size[i]   = 0;
     jmp_rule[i]   = 0;
-
-    // Maximum alignment padding for loop block was used
-    // during first round of branches shortening, as result
-    // padding for nodes (sfpt after call) was not added.
-    // Take this into account for block's size change check
-    // and allow increase block's size by the difference
-    // of maximum and actual alignment paddings.
-    int orig_blk_size = blk_starts[i+1] - blk_starts[i] + block_alignment_padding;
 #endif
     int blk_offset = current_offset;
 
@@ -1557,8 +1556,6 @@
       }
 
     } // End for all instructions in block
-    assert((uint)blk_offset <= blk_starts[i], "shouldn't increase distance");
-    blk_starts[i] = blk_offset;
 
     // If the next block is the top of a loop, pad this block out to align
     // the loop top a little. Helps prevent pipe stalls at loop back branches.
@@ -1572,16 +1569,13 @@
         nop->emit(*cb, _regalloc);
         current_offset = cb->insts_size();
       }
-#ifdef ASSERT
-      int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
-      block_alignment_padding = (max_loop_pad - padding);
-      assert(block_alignment_padding >= 0, "sanity");
-#endif
     }
     // Verify that the distance for generated before forward
     // short branches is still valid.
-    assert(orig_blk_size >= (current_offset - blk_offset), "shouldn't increase block size");
-
+    guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
+
+    // Save new block start offset
+    blk_starts[i] = blk_offset;
   } // End of for all blocks
   blk_starts[nblocks] = current_offset;
 
--- a/hotspot/src/share/vm/opto/parse2.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -104,7 +104,8 @@
     if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
   }
 
-  if (!arytype->klass()->is_loaded()) {
+  ciKlass * arytype_klass = arytype->klass();
+  if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
     // Only fails for some -Xcomp runs
     // The class is unloaded.  We have to run this bytecode in the interpreter.
     uncommon_trap(Deoptimization::Reason_unloaded,
@@ -1385,6 +1386,7 @@
   if (TraceOptoParse) {
     tty->print(" @");
     dump_bci(bci());
+    tty->cr();
   }
 #endif
 
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -1166,31 +1166,30 @@
     if (progress_state == PROCESS_INPUTS) {
       // After following inputs, continue to outputs
       _stack.set_index(PROCESS_OUTPUTS);
-      // Remove from iterative worklist
-      _worklist.remove(dead);
       if (!dead->is_Con()) { // Don't kill cons but uses
         bool recurse = false;
         // Remove from hash table
         _table.hash_delete( dead );
         // Smash all inputs to 'dead', isolating him completely
-        for( uint i = 0; i < dead->req(); i++ ) {
+        for (uint i = 0; i < dead->req(); i++) {
           Node *in = dead->in(i);
-          if( in ) {                 // Points to something?
-            dead->set_req(i,NULL);  // Kill the edge
-            if (in->outcnt() == 0 && in != C->top()) {// Made input go dead?
+          if (in != NULL && in != C->top()) {  // Points to something?
+            int nrep = dead->replace_edge(in, NULL);  // Kill edges
+            assert((nrep > 0), "sanity");
+            if (in->outcnt() == 0) { // Made input go dead?
               _stack.push(in, PROCESS_INPUTS); // Recursively remove
               recurse = true;
             } else if (in->outcnt() == 1 &&
                        in->has_special_unique_user()) {
               _worklist.push(in->unique_out());
             } else if (in->outcnt() <= 2 && dead->is_Phi()) {
-              if( in->Opcode() == Op_Region )
+              if (in->Opcode() == Op_Region) {
                 _worklist.push(in);
-              else if( in->is_Store() ) {
+              } else if (in->is_Store()) {
                 DUIterator_Fast imax, i = in->fast_outs(imax);
                 _worklist.push(in->fast_out(i));
                 i++;
-                if(in->outcnt() == 2) {
+                if (in->outcnt() == 2) {
                   _worklist.push(in->fast_out(i));
                   i++;
                 }
@@ -1209,38 +1208,42 @@
                 }
               }
             }
-          }
-        }
-        C->record_dead_node(dead->_idx);
-        if (dead->is_macro()) {
-          C->remove_macro_node(dead);
-        }
-        if (dead->is_expensive()) {
-          C->remove_expensive_node(dead);
-        }
-
+          } // if (in != NULL && in != C->top())
+        } // for (uint i = 0; i < dead->req(); i++)
         if (recurse) {
           continue;
         }
-      }
-      // Constant node that has no out-edges and has only one in-edge from
-      // root is usually dead. However, sometimes reshaping walk makes
-      // it reachable by adding use edges. So, we will NOT count Con nodes
-      // as dead to be conservative about the dead node count at any
-      // given time.
-    }
+      } // if (!dead->is_Con())
+    } // if (progress_state == PROCESS_INPUTS)
 
     // Aggressively kill globally dead uses
     // (Rather than pushing all the outs at once, we push one at a time,
     // plus the parent to resume later, because of the indefinite number
     // of edge deletions per loop trip.)
     if (dead->outcnt() > 0) {
-      // Recursively remove
+      // Recursively remove output edges
       _stack.push(dead->raw_out(0), PROCESS_INPUTS);
     } else {
+      // Finished disconnecting all input and output edges.
       _stack.pop();
+      // Remove dead node from iterative worklist
+      _worklist.remove(dead);
+      // Constant node that has no out-edges and has only one in-edge from
+      // root is usually dead. However, sometimes reshaping walk makes
+      // it reachable by adding use edges. So, we will NOT count Con nodes
+      // as dead to be conservative about the dead node count at any
+      // given time.
+      if (!dead->is_Con()) {
+        C->record_dead_node(dead->_idx);
+      }
+      if (dead->is_macro()) {
+        C->remove_macro_node(dead);
+      }
+      if (dead->is_expensive()) {
+        C->remove_expensive_node(dead);
+      }
     }
-  }
+  } // while (_stack.is_nonempty())
 }
 
 //------------------------------subsume_node-----------------------------------
--- a/hotspot/src/share/vm/prims/jvm.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -30,6 +30,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/bytecode.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/fieldStreams.hpp"
@@ -665,8 +666,51 @@
 
 JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
   JVMWrapper("JVM_GetCallerClass");
-  Klass* k = thread->security_get_caller_class(depth);
-  return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
+
+  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
+  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
+    Klass* k = thread->security_get_caller_class(depth);
+    return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
+  } else {
+    // Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
+    assert(depth == -1, "wrong handshake depth");
+  }
+
+  // Getting the class of the caller frame.
+  //
+  // The call stack at this point looks something like this:
+  //
+  // [0] [ @CallerSensitive public sun.reflect.Reflection.getCallerClass ]
+  // [1] [ @CallerSensitive API.method                                   ]
+  // [.] [ (skipped intermediate frames)                                 ]
+  // [n] [ caller                                                        ]
+  vframeStream vfst(thread);
+  // Cf. LibraryCallKit::inline_native_Reflection_getCallerClass
+  for (int n = 0; !vfst.at_end(); vfst.security_next(), n++) {
+    Method* m = vfst.method();
+    assert(m != NULL, "sanity");
+    switch (n) {
+    case 0:
+      // This must only be called from Reflection.getCallerClass
+      if (m->intrinsic_id() != vmIntrinsics::_getCallerClass) {
+        THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetCallerClass must only be called from Reflection.getCallerClass");
+      }
+      // fall-through
+    case 1:
+      // Frame 0 and 1 must be caller sensitive.
+      if (!m->caller_sensitive()) {
+        THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), err_msg("CallerSensitive annotation expected at frame %d", n));
+      }
+      break;
+    default:
+      if (!m->is_ignored_by_security_stack_walk()) {
+        // We have reached the desired frame; return the holder class.
+        return (jclass) JNIHandles::make_local(env, m->method_holder()->java_mirror());
+      }
+      break;
+    }
+  }
+  return NULL;
 JVM_END
 
 
@@ -3208,11 +3252,24 @@
   KlassLink* first = NULL;
   KlassLink* last  = NULL;
   int depth = 0;
-
-  for(vframeStream vfst(thread); !vfst.at_end(); vfst.security_get_caller_frame(1)) {
+  vframeStream vfst(thread);
+
+  if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
+    // This must only be called from SecurityManager.getClassContext
+    Method* m = vfst.method();
+    if (!(m->method_holder() == SystemDictionary::SecurityManager_klass() &&
+          m->name()          == vmSymbols::getClassContext_name() &&
+          m->signature()     == vmSymbols::void_class_array_signature())) {
+      THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "JVM_GetClassContext must only be called from SecurityManager.getClassContext");
+    }
+  }
+
+  // Collect method holders
+  for (; !vfst.at_end(); vfst.security_next()) {
+    Method* m = vfst.method();
     // Native frames are not returned
-    if (!vfst.method()->is_native()) {
-      Klass* holder = vfst.method()->method_holder();
+    if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
+      Klass* holder = m->method_holder();
       assert(holder->is_klass(), "just checking");
       depth++;
       KlassLink* l = new KlassLink(KlassHandle(thread, holder));
--- a/hotspot/src/share/vm/prims/jvmtiEventController.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEventController.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -39,7 +39,12 @@
 #include "runtime/vm_operations.hpp"
 
 #ifdef JVMTI_TRACE
-#define EC_TRACE(out) if (JvmtiTrace::trace_event_controller()) { SafeResourceMark rm; tty->print_cr out; } while (0)
+#define EC_TRACE(out) do { \
+  if (JvmtiTrace::trace_event_controller()) { \
+    SafeResourceMark rm; \
+    tty->print_cr out; \
+  } \
+} while (0)
 #else
 #define EC_TRACE(out)
 #endif /*JVMTI_TRACE */
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -72,36 +72,6 @@
 //    0x20000000 |  536870912 - unused
 //    0x40000000 | 1073741824 - unused
 //    0x80000000 | 2147483648 - unused
-//
-// Note: The ResourceMark is to cleanup resource allocated args.
-//   The "while (0)" is so we can use semi-colon at end of RC_TRACE().
-#define RC_TRACE(level, args) \
-  if ((TraceRedefineClasses & level) != 0) { \
-    ResourceMark rm; \
-    tty->print("RedefineClasses-0x%x: ", level); \
-    tty->print_cr args; \
-  } while (0)
-
-#define RC_TRACE_NO_CR(level, args) \
-  if ((TraceRedefineClasses & level) != 0) { \
-    ResourceMark rm; \
-    tty->print("RedefineClasses-0x%x: ", level); \
-    tty->print args; \
-  } while (0)
-
-#define RC_TRACE_WITH_THREAD(level, thread, args) \
-  if ((TraceRedefineClasses & level) != 0) { \
-    ResourceMark rm(thread); \
-    tty->print("RedefineClasses-0x%x: ", level); \
-    tty->print_cr args; \
-  } while (0)
-
-#define RC_TRACE_MESG(args) \
-  { \
-    ResourceMark rm; \
-    tty->print("RedefineClasses: "); \
-    tty->print_cr args; \
-  } while (0)
 
 // Macro for checking if TraceRedefineClasses has a specific bit
 // enabled. Returns true if the bit specified by level is set.
@@ -120,16 +90,49 @@
 #define RC_TRACE_IN_RANGE(low, high) \
 (((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0)
 
-// Timer support macros. Only do timer operations if timer tracing
-// is enabled. The "while (0)" is so we can use semi-colon at end of
-// the macro.
-#define RC_TIMER_START(t) \
+// Note: The ResourceMark is to cleanup resource allocated args.
+// The "do {...} while (0)" is so we can use semi-colon at end of RC_TRACE().
+#define RC_TRACE(level, args) do { \
+  if (RC_TRACE_ENABLED(level)) { \
+    ResourceMark rm; \
+    tty->print("RedefineClasses-0x%x: ", level); \
+    tty->print_cr args; \
+  } \
+} while (0)
+
+#define RC_TRACE_NO_CR(level, args) do { \
+  if (RC_TRACE_ENABLED(level)) { \
+    ResourceMark rm; \
+    tty->print("RedefineClasses-0x%x: ", level); \
+    tty->print args; \
+  } \
+} while (0)
+
+#define RC_TRACE_WITH_THREAD(level, thread, args) do { \
+  if (RC_TRACE_ENABLED(level)) { \
+    ResourceMark rm(thread); \
+    tty->print("RedefineClasses-0x%x: ", level); \
+    tty->print_cr args; \
+  } \
+} while (0)
+
+#define RC_TRACE_MESG(args) do { \
+  ResourceMark rm; \
+  tty->print("RedefineClasses: "); \
+  tty->print_cr args; \
+} while (0)
+
+// Timer support macros. Only do timer operations if timer tracing is enabled.
+// The "do {...} while (0)" is so we can use semi-colon at end of the macro.
+#define RC_TIMER_START(t) do { \
   if (RC_TRACE_ENABLED(0x00000004)) { \
     t.start(); \
-  } while (0)
-#define RC_TIMER_STOP(t) \
+  } \
+} while (0)
+#define RC_TIMER_STOP(t) do { \
   if (RC_TRACE_ENABLED(0x00000004)) { \
     t.stop(); \
-  } while (0)
+  } \
+} while (0)
 
 #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -105,14 +105,15 @@
 
 // import java_lang_invoke_MemberName.*
 enum {
-  IS_METHOD      = java_lang_invoke_MemberName::MN_IS_METHOD,
-  IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR,
-  IS_FIELD       = java_lang_invoke_MemberName::MN_IS_FIELD,
-  IS_TYPE        = java_lang_invoke_MemberName::MN_IS_TYPE,
+  IS_METHOD            = java_lang_invoke_MemberName::MN_IS_METHOD,
+  IS_CONSTRUCTOR       = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR,
+  IS_FIELD             = java_lang_invoke_MemberName::MN_IS_FIELD,
+  IS_TYPE              = java_lang_invoke_MemberName::MN_IS_TYPE,
+  CALLER_SENSITIVE     = java_lang_invoke_MemberName::MN_CALLER_SENSITIVE,
   REFERENCE_KIND_SHIFT = java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT,
   REFERENCE_KIND_MASK  = java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK,
-  SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES,
-  SEARCH_INTERFACES   = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES,
+  SEARCH_SUPERCLASSES  = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES,
+  SEARCH_INTERFACES    = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES,
   ALL_KINDS      = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE
 };
 
@@ -207,10 +208,15 @@
     vmindex = m->vtable_index();
   }
 
-  java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
+  // @CallerSensitive annotation detected
+  if (m->caller_sensitive()) {
+    flags |= CALLER_SENSITIVE;
+  }
+
+  java_lang_invoke_MemberName::set_flags(   mname_oop, flags);
   java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
-  java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);   // vtable/itable index
-  java_lang_invoke_MemberName::set_clazz(mname_oop,    receiver_limit->java_mirror());
+  java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex);   // vtable/itable index
+  java_lang_invoke_MemberName::set_clazz(   mname_oop, receiver_limit->java_mirror());
   // Note:  name and type can be lazily computed by resolve_MemberName,
   // if Java code needs them as resolved String and MethodType objects.
   // The clazz must be eagerly stored, because it provides a GC
@@ -940,6 +946,7 @@
     template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \
     template(java_lang_invoke_MemberName,MN_IS_FIELD) \
     template(java_lang_invoke_MemberName,MN_IS_TYPE) \
+    template(java_lang_invoke_MemberName,MN_CALLER_SENSITIVE) \
     template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \
     template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \
     template(java_lang_invoke_MemberName,MN_REFERENCE_KIND_SHIFT) \
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -868,7 +868,7 @@
   env->ThrowNew(cls, msg);
 }
 
-static jclass Unsafe_DefineClass(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
+static jclass Unsafe_DefineClass_impl(JNIEnv *env, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd) {
   {
     // Code lifted from JDK 1.3 ClassLoader.c
 
@@ -939,6 +939,15 @@
 }
 
 
+UNSAFE_ENTRY(jclass, Unsafe_DefineClass(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
+  UnsafeWrapper("Unsafe_DefineClass");
+  {
+    ThreadToNativeFromVM ttnfv(thread);
+    return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
+  }
+UNSAFE_END
+
+
 UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
   UnsafeWrapper("Unsafe_DefineClass");
   {
@@ -949,20 +958,11 @@
     jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller);
     jobject pd     = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller);
 
-    return Unsafe_DefineClass(env, name, data, offset, length, loader, pd);
+    return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
   }
 UNSAFE_END
 
 
-UNSAFE_ENTRY(jclass, Unsafe_DefineClass1(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd))
-  UnsafeWrapper("Unsafe_DefineClass");
-  {
-    ThreadToNativeFromVM ttnfv(thread);
-
-    return Unsafe_DefineClass(env, name, data, offset, length, loader, pd);
-  }
-UNSAFE_END
-
 #define DAC_Args CLS"[B["OBJ
 // define a class but do not make it known to the class loader or system dictionary
 // - host_class:  supplies context for linkage, access control, protection domain, and class loader
@@ -1323,7 +1323,7 @@
 #define THR LANG"Throwable;"
 
 #define DC0_Args LANG"String;[BII"
-#define DC1_Args DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;"
+#define DC_Args  DC0_Args LANG"ClassLoader;" "Ljava/security/ProtectionDomain;"
 
 #define CC (char*)  /*cast a literal from (const char*)*/
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@@ -1352,10 +1352,8 @@
 
 
 
-// %%% These are temporarily supported until the SDK sources
-// contain the necessarily updated Unsafe.java.
+// These are the methods for 1.4.0
 static JNINativeMethod methods_140[] = {
-
     {CC"getObject",        CC"("OBJ"I)"OBJ"",   FN_PTR(Unsafe_GetObject140)},
     {CC"putObject",        CC"("OBJ"I"OBJ")V",  FN_PTR(Unsafe_SetObject140)},
 
@@ -1381,12 +1379,10 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
-    {CC"fieldOffset",        CC"("FLD")I",               FN_PTR(Unsafe_FieldOffset)}, //deprecated
-    {CC"staticFieldBase",    CC"("CLS")"OBJ,             FN_PTR(Unsafe_StaticFieldBaseFromClass)}, //deprecated
+    {CC"fieldOffset",        CC"("FLD")I",               FN_PTR(Unsafe_FieldOffset)},
+    {CC"staticFieldBase",    CC"("CLS")"OBJ,             FN_PTR(Unsafe_StaticFieldBaseFromClass)},
     {CC"ensureClassInitialized",CC"("CLS")V",            FN_PTR(Unsafe_EnsureClassInitialized)},
     {CC"arrayBaseOffset",    CC"("CLS")I",               FN_PTR(Unsafe_ArrayBaseOffset)},
     {CC"arrayIndexScale",    CC"("CLS")I",               FN_PTR(Unsafe_ArrayIndexScale)},
@@ -1394,16 +1390,15 @@
     {CC"pageSize",           CC"()I",                    FN_PTR(Unsafe_PageSize)},
 
     {CC"defineClass",        CC"("DC0_Args")"CLS,        FN_PTR(Unsafe_DefineClass0)},
-    {CC"defineClass",        CC"("DC1_Args")"CLS,        FN_PTR(Unsafe_DefineClass1)},
+    {CC"defineClass",        CC"("DC_Args")"CLS,         FN_PTR(Unsafe_DefineClass)},
     {CC"allocateInstance",   CC"("CLS")"OBJ,             FN_PTR(Unsafe_AllocateInstance)},
     {CC"monitorEnter",       CC"("OBJ")V",               FN_PTR(Unsafe_MonitorEnter)},
     {CC"monitorExit",        CC"("OBJ")V",               FN_PTR(Unsafe_MonitorExit)},
     {CC"throwException",     CC"("THR")V",               FN_PTR(Unsafe_ThrowException)}
 };
 
-// These are the old methods prior to the JSR 166 changes in 1.5.0
+// These are the methods prior to the JSR 166 changes in 1.5.0
 static JNINativeMethod methods_141[] = {
-
     {CC"getObject",        CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObject)},
     {CC"putObject",        CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObject)},
 
@@ -1429,8 +1424,6 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1443,7 +1436,7 @@
     {CC"pageSize",           CC"()I",                    FN_PTR(Unsafe_PageSize)},
 
     {CC"defineClass",        CC"("DC0_Args")"CLS,        FN_PTR(Unsafe_DefineClass0)},
-    {CC"defineClass",        CC"("DC1_Args")"CLS,        FN_PTR(Unsafe_DefineClass1)},
+    {CC"defineClass",        CC"("DC_Args")"CLS,         FN_PTR(Unsafe_DefineClass)},
     {CC"allocateInstance",   CC"("CLS")"OBJ,             FN_PTR(Unsafe_AllocateInstance)},
     {CC"monitorEnter",       CC"("OBJ")V",               FN_PTR(Unsafe_MonitorEnter)},
     {CC"monitorExit",        CC"("OBJ")V",               FN_PTR(Unsafe_MonitorExit)},
@@ -1451,9 +1444,8 @@
 
 };
 
-// These are the old methods prior to the JSR 166 changes in 1.6.0
+// These are the methods prior to the JSR 166 changes in 1.6.0
 static JNINativeMethod methods_15[] = {
-
     {CC"getObject",        CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObject)},
     {CC"putObject",        CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObject)},
     {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObjectVolatile)},
@@ -1482,8 +1474,6 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1496,7 +1486,7 @@
     {CC"pageSize",           CC"()I",                    FN_PTR(Unsafe_PageSize)},
 
     {CC"defineClass",        CC"("DC0_Args")"CLS,        FN_PTR(Unsafe_DefineClass0)},
-    {CC"defineClass",        CC"("DC1_Args")"CLS,        FN_PTR(Unsafe_DefineClass1)},
+    {CC"defineClass",        CC"("DC_Args")"CLS,         FN_PTR(Unsafe_DefineClass)},
     {CC"allocateInstance",   CC"("CLS")"OBJ,             FN_PTR(Unsafe_AllocateInstance)},
     {CC"monitorEnter",       CC"("OBJ")V",               FN_PTR(Unsafe_MonitorEnter)},
     {CC"monitorExit",        CC"("OBJ")V",               FN_PTR(Unsafe_MonitorExit)},
@@ -1509,14 +1499,69 @@
 
 };
 
-// These are the correct methods, moving forward:
-static JNINativeMethod methods[] = {
-
+// These are the methods for 1.6.0 and 1.7.0
+static JNINativeMethod methods_16[] = {
     {CC"getObject",        CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObject)},
     {CC"putObject",        CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObject)},
     {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObjectVolatile)},
     {CC"putObjectVolatile",CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObjectVolatile)},
 
+    DECLARE_GETSETOOP(Boolean, Z),
+    DECLARE_GETSETOOP(Byte, B),
+    DECLARE_GETSETOOP(Short, S),
+    DECLARE_GETSETOOP(Char, C),
+    DECLARE_GETSETOOP(Int, I),
+    DECLARE_GETSETOOP(Long, J),
+    DECLARE_GETSETOOP(Float, F),
+    DECLARE_GETSETOOP(Double, D),
+
+    DECLARE_GETSETNATIVE(Byte, B),
+    DECLARE_GETSETNATIVE(Short, S),
+    DECLARE_GETSETNATIVE(Char, C),
+    DECLARE_GETSETNATIVE(Int, I),
+    DECLARE_GETSETNATIVE(Long, J),
+    DECLARE_GETSETNATIVE(Float, F),
+    DECLARE_GETSETNATIVE(Double, D),
+
+    {CC"getAddress",         CC"("ADR")"ADR,             FN_PTR(Unsafe_GetNativeAddress)},
+    {CC"putAddress",         CC"("ADR""ADR")V",          FN_PTR(Unsafe_SetNativeAddress)},
+
+    {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
+    {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
+    {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
+
+    {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
+    {CC"staticFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_StaticFieldOffset)},
+    {CC"staticFieldBase",    CC"("FLD")"OBJ,             FN_PTR(Unsafe_StaticFieldBaseFromField)},
+    {CC"ensureClassInitialized",CC"("CLS")V",            FN_PTR(Unsafe_EnsureClassInitialized)},
+    {CC"arrayBaseOffset",    CC"("CLS")I",               FN_PTR(Unsafe_ArrayBaseOffset)},
+    {CC"arrayIndexScale",    CC"("CLS")I",               FN_PTR(Unsafe_ArrayIndexScale)},
+    {CC"addressSize",        CC"()I",                    FN_PTR(Unsafe_AddressSize)},
+    {CC"pageSize",           CC"()I",                    FN_PTR(Unsafe_PageSize)},
+
+    {CC"defineClass",        CC"("DC0_Args")"CLS,        FN_PTR(Unsafe_DefineClass0)},
+    {CC"defineClass",        CC"("DC_Args")"CLS,         FN_PTR(Unsafe_DefineClass)},
+    {CC"allocateInstance",   CC"("CLS")"OBJ,             FN_PTR(Unsafe_AllocateInstance)},
+    {CC"monitorEnter",       CC"("OBJ")V",               FN_PTR(Unsafe_MonitorEnter)},
+    {CC"monitorExit",        CC"("OBJ")V",               FN_PTR(Unsafe_MonitorExit)},
+    {CC"tryMonitorEnter",    CC"("OBJ")Z",               FN_PTR(Unsafe_TryMonitorEnter)},
+    {CC"throwException",     CC"("THR")V",               FN_PTR(Unsafe_ThrowException)},
+    {CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z",  FN_PTR(Unsafe_CompareAndSwapObject)},
+    {CC"compareAndSwapInt",  CC"("OBJ"J""I""I"")Z",      FN_PTR(Unsafe_CompareAndSwapInt)},
+    {CC"compareAndSwapLong", CC"("OBJ"J""J""J"")Z",      FN_PTR(Unsafe_CompareAndSwapLong)},
+    {CC"putOrderedObject",   CC"("OBJ"J"OBJ")V",         FN_PTR(Unsafe_SetOrderedObject)},
+    {CC"putOrderedInt",      CC"("OBJ"JI)V",             FN_PTR(Unsafe_SetOrderedInt)},
+    {CC"putOrderedLong",     CC"("OBJ"JJ)V",             FN_PTR(Unsafe_SetOrderedLong)},
+    {CC"park",               CC"(ZJ)V",                  FN_PTR(Unsafe_Park)},
+    {CC"unpark",             CC"("OBJ")V",               FN_PTR(Unsafe_Unpark)}
+};
+
+// These are the methods for 1.8.0
+static JNINativeMethod methods_18[] = {
+    {CC"getObject",        CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObject)},
+    {CC"putObject",        CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObject)},
+    {CC"getObjectVolatile",CC"("OBJ"J)"OBJ"",   FN_PTR(Unsafe_GetObjectVolatile)},
+    {CC"putObjectVolatile",CC"("OBJ"J"OBJ")V",  FN_PTR(Unsafe_SetObjectVolatile)},
 
     DECLARE_GETSETOOP(Boolean, Z),
     DECLARE_GETSETOOP(Byte, B),
@@ -1540,8 +1585,6 @@
 
     {CC"allocateMemory",     CC"(J)"ADR,                 FN_PTR(Unsafe_AllocateMemory)},
     {CC"reallocateMemory",   CC"("ADR"J)"ADR,            FN_PTR(Unsafe_ReallocateMemory)},
-//  {CC"setMemory",          CC"("ADR"JB)V",             FN_PTR(Unsafe_SetMemory)},
-//  {CC"copyMemory",         CC"("ADR ADR"J)V",          FN_PTR(Unsafe_CopyMemory)},
     {CC"freeMemory",         CC"("ADR")V",               FN_PTR(Unsafe_FreeMemory)},
 
     {CC"objectFieldOffset",  CC"("FLD")J",               FN_PTR(Unsafe_ObjectFieldOffset)},
@@ -1553,8 +1596,7 @@
     {CC"addressSize",        CC"()I",                    FN_PTR(Unsafe_AddressSize)},
     {CC"pageSize",           CC"()I",                    FN_PTR(Unsafe_PageSize)},
 
-    {CC"defineClass",        CC"("DC0_Args")"CLS,        FN_PTR(Unsafe_DefineClass0)},
-    {CC"defineClass",        CC"("DC1_Args")"CLS,        FN_PTR(Unsafe_DefineClass1)},
+    {CC"defineClass",        CC"("DC_Args")"CLS,         FN_PTR(Unsafe_DefineClass)},
     {CC"allocateInstance",   CC"("CLS")"OBJ,             FN_PTR(Unsafe_AllocateInstance)},
     {CC"monitorEnter",       CC"("OBJ")V",               FN_PTR(Unsafe_MonitorEnter)},
     {CC"monitorExit",        CC"("OBJ")V",               FN_PTR(Unsafe_MonitorExit)},
@@ -1566,23 +1608,12 @@
     {CC"putOrderedObject",   CC"("OBJ"J"OBJ")V",         FN_PTR(Unsafe_SetOrderedObject)},
     {CC"putOrderedInt",      CC"("OBJ"JI)V",             FN_PTR(Unsafe_SetOrderedInt)},
     {CC"putOrderedLong",     CC"("OBJ"JJ)V",             FN_PTR(Unsafe_SetOrderedLong)},
-    {CC"loadFence",          CC"()V",                    FN_PTR(Unsafe_LoadFence)},
-    {CC"storeFence",         CC"()V",                    FN_PTR(Unsafe_StoreFence)},
-    {CC"fullFence",          CC"()V",                    FN_PTR(Unsafe_FullFence)},
     {CC"park",               CC"(ZJ)V",                  FN_PTR(Unsafe_Park)},
     {CC"unpark",             CC"("OBJ")V",               FN_PTR(Unsafe_Unpark)}
-
-//    {CC"getLoadAverage",     CC"([DI)I",                 FN_PTR(Unsafe_Loadavg)},
-
-//    {CC"prefetchRead",       CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
-//    {CC"prefetchWrite",      CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)}
-//    {CC"prefetchReadStatic", CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchRead)},
-//    {CC"prefetchWriteStatic",CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)}
-
 };
 
 JNINativeMethod loadavg_method[] = {
-    {CC"getLoadAverage",            CC"([DI)I",                 FN_PTR(Unsafe_Loadavg)}
+    {CC"getLoadAverage",     CC"([DI)I",                 FN_PTR(Unsafe_Loadavg)}
 };
 
 JNINativeMethod prefetch_methods[] = {
@@ -1592,7 +1623,7 @@
     {CC"prefetchWriteStatic",CC"("OBJ"J)V",              FN_PTR(Unsafe_PrefetchWrite)}
 };
 
-JNINativeMethod memcopy_methods[] = {
+JNINativeMethod memcopy_methods_17[] = {
     {CC"copyMemory",         CC"("OBJ"J"OBJ"JJ)V",       FN_PTR(Unsafe_CopyMemory2)},
     {CC"setMemory",          CC"("OBJ"JJB)V",            FN_PTR(Unsafe_SetMemory2)}
 };
@@ -1610,6 +1641,12 @@
     {CC"shouldBeInitialized",CC"("CLS")Z",               FN_PTR(Unsafe_ShouldBeInitialized)},
 };
 
+JNINativeMethod fence_methods[] = {
+    {CC"loadFence",          CC"()V",                    FN_PTR(Unsafe_LoadFence)},
+    {CC"storeFence",         CC"()V",                    FN_PTR(Unsafe_StoreFence)},
+    {CC"fullFence",          CC"()V",                    FN_PTR(Unsafe_FullFence)},
+};
+
 #undef CC
 #undef FN_PTR
 
@@ -1622,12 +1659,32 @@
 #undef MTH
 #undef THR
 #undef DC0_Args
-#undef DC1_Args
+#undef DC_Args
 
 #undef DECLARE_GETSETOOP
 #undef DECLARE_GETSETNATIVE
 
 
+/**
+ * Helper method to register native methods.
+ */
+static bool register_natives(const char* message, JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) {
+  int status = env->RegisterNatives(clazz, methods, nMethods);
+  if (status < 0 || env->ExceptionOccurred()) {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      tty->print_cr("Unsafe:  failed registering %s", message);
+    }
+    env->ExceptionClear();
+    return false;
+  } else {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      tty->print_cr("Unsafe:  successfully registered %s", message);
+    }
+    return true;
+  }
+}
+
+
 // This one function is exported, used by NativeLookup.
 // The Unsafe_xxx functions above are called only from the interpreter.
 // The optimizer looks at names and signatures to recognize
@@ -1637,83 +1694,57 @@
   UnsafeWrapper("JVM_RegisterUnsafeMethods");
   {
     ThreadToNativeFromVM ttnfv(thread);
-    {
-      env->RegisterNatives(unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
-      if (env->ExceptionOccurred()) {
-        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-          tty->print_cr("Warning:  SDK 1.6 Unsafe.loadavg not found.");
-        }
-        env->ExceptionClear();
-      }
-    }
+
+    // Unsafe methods
     {
-      env->RegisterNatives(unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
-      if (env->ExceptionOccurred()) {
-        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-          tty->print_cr("Warning:  SDK 1.6 Unsafe.prefetchRead/Write not found.");
-        }
-        env->ExceptionClear();
+      bool success = false;
+      // We need to register the 1.6 methods first because the 1.8 methods would register fine on 1.7 and 1.6
+      if (!success) {
+        success = register_natives("1.6 methods",   env, unsafecls, methods_16,  sizeof(methods_16)/sizeof(JNINativeMethod));
+      }
+      if (!success) {
+        success = register_natives("1.8 methods",   env, unsafecls, methods_18,  sizeof(methods_18)/sizeof(JNINativeMethod));
+      }
+      if (!success) {
+        success = register_natives("1.5 methods",   env, unsafecls, methods_15,  sizeof(methods_15)/sizeof(JNINativeMethod));
+      }
+      if (!success) {
+        success = register_natives("1.4.1 methods", env, unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
       }
+      if (!success) {
+        success = register_natives("1.4.0 methods", env, unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
+      }
+      guarantee(success, "register unsafe natives");
     }
+
+    // Unsafe.getLoadAverage
+    register_natives("1.6 loadavg method", env, unsafecls, loadavg_method, sizeof(loadavg_method)/sizeof(JNINativeMethod));
+
+    // Prefetch methods
+    register_natives("1.6 prefetch methods", env, unsafecls, prefetch_methods, sizeof(prefetch_methods)/sizeof(JNINativeMethod));
+
+    // Memory copy methods
     {
-      env->RegisterNatives(unsafecls, memcopy_methods, sizeof(memcopy_methods)/sizeof(JNINativeMethod));
-      if (env->ExceptionOccurred()) {
-        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-          tty->print_cr("Warning:  SDK 1.7 Unsafe.copyMemory not found.");
-        }
-        env->ExceptionClear();
-        env->RegisterNatives(unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
-        if (env->ExceptionOccurred()) {
-          if (PrintMiscellaneous && (Verbose || WizardMode)) {
-            tty->print_cr("Warning:  SDK 1.5 Unsafe.copyMemory not found.");
-          }
-          env->ExceptionClear();
-        }
+      bool success = false;
+      if (!success) {
+        success = register_natives("1.7 memory copy methods", env, unsafecls, memcopy_methods_17, sizeof(memcopy_methods_17)/sizeof(JNINativeMethod));
+      }
+      if (!success) {
+        success = register_natives("1.5 memory copy methods", env, unsafecls, memcopy_methods_15, sizeof(memcopy_methods_15)/sizeof(JNINativeMethod));
       }
     }
+
+    // Unsafe.defineAnonymousClass
     if (EnableInvokeDynamic) {
-      env->RegisterNatives(unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
-      if (env->ExceptionOccurred()) {
-        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-          tty->print_cr("Warning:  SDK 1.7 Unsafe.defineClass (anonymous version) not found.");
-        }
-        env->ExceptionClear();
-      }
-    }
-    if (EnableInvokeDynamic) {
-      env->RegisterNatives(unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
-      if (env->ExceptionOccurred()) {
-        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-          tty->print_cr("Warning:  SDK 1.7 LambdaForm support in Unsafe not found.");
-        }
-        env->ExceptionClear();
-      }
+      register_natives("1.7 define anonymous class method", env, unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod));
     }
-    int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
-    if (env->ExceptionOccurred()) {
-      if (PrintMiscellaneous && (Verbose || WizardMode)) {
-        tty->print_cr("Warning:  SDK 1.6 version of Unsafe not found.");
-      }
-      env->ExceptionClear();
-      // %%% For now, be backward compatible with an older class:
-      status = env->RegisterNatives(unsafecls, methods_15, sizeof(methods_15)/sizeof(JNINativeMethod));
+
+    // Unsafe.shouldBeInitialized
+    if (EnableInvokeDynamic) {
+      register_natives("1.7 LambdaForm support", env, unsafecls, lform_methods, sizeof(lform_methods)/sizeof(JNINativeMethod));
     }
-    if (env->ExceptionOccurred()) {
-      if (PrintMiscellaneous && (Verbose || WizardMode)) {
-        tty->print_cr("Warning:  SDK 1.5 version of Unsafe not found.");
-      }
-      env->ExceptionClear();
-      // %%% For now, be backward compatible with an older class:
-      status = env->RegisterNatives(unsafecls, methods_141, sizeof(methods_141)/sizeof(JNINativeMethod));
-    }
-    if (env->ExceptionOccurred()) {
-      if (PrintMiscellaneous && (Verbose || WizardMode)) {
-        tty->print_cr("Warning:  SDK 1.4.1 version of Unsafe not found.");
-      }
-      env->ExceptionClear();
-      // %%% For now, be backward compatible with an older class:
-      status = env->RegisterNatives(unsafecls, methods_140, sizeof(methods_140)/sizeof(JNINativeMethod));
-    }
-    guarantee(status == 0, "register unsafe natives");
+
+    // Fence methods
+    register_natives("1.8 fence methods", env, unsafecls, fence_methods, sizeof(fence_methods)/sizeof(JNINativeMethod));
   }
 JVM_END
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -2528,7 +2528,7 @@
           "disable locking assertions (for speed)")                         \
                                                                             \
   product(bool, RangeCheckElimination, true,                                \
-          "Split loop iterations to eliminate range checks")                \
+          "Eliminate range checks")                                         \
                                                                             \
   develop_pd(bool, UncommonNullCast,                                        \
           "track occurrences of null in casts; adjust compiler tactics")    \
--- a/hotspot/src/share/vm/runtime/vframe.cpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/runtime/vframe.cpp	Thu Apr 04 17:01:34 2013 -0700
@@ -391,40 +391,27 @@
 // Step back n frames, skip any pseudo frames in between.
 // This function is used in Class.forName, Class.newInstance, Method.Invoke,
 // AccessController.doPrivileged.
-//
-// NOTE that in JDK 1.4 this has been exposed to Java as
-// sun.reflect.Reflection.getCallerClass(), which can be inlined.
-// Inlined versions must match this routine's logic.
-// Native method prefixing logic does not need to match since
-// the method names don't match and inlining will not occur.
-// See, for example,
-// Parse::inline_native_Reflection_getCallerClass in
-// opto/library_call.cpp.
 void vframeStreamCommon::security_get_caller_frame(int depth) {
-  bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
+  assert(depth >= 0, err_msg("invalid depth: %d", depth));
+  for (int n = 0; !at_end(); security_next()) {
+    if (!method()->is_ignored_by_security_stack_walk()) {
+      if (n == depth) {
+        // We have reached the desired depth; return.
+        return;
+      }
+      n++;  // this is a non-skipped frame; count it against the depth
+    }
+  }
+  // NOTE: At this point there were not enough frames on the stack
+  // to walk to depth.  Callers of this method have to check for at_end.
+}
 
-  while (!at_end()) {
-    if (Universe::reflect_invoke_cache()->is_same_method(method())) {
-      // This is Method.invoke() -- skip it
-    } else if (use_new_reflection &&
-              method()->method_holder()
-                 ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
-      // This is an auxilary frame -- skip it
-    } else if (method()->is_method_handle_intrinsic() ||
-               method()->is_compiled_lambda_form()) {
-      // This is an internal adapter frame for method handles -- skip it
-    } else {
-      // This is non-excluded frame, we need to count it against the depth
-      if (depth-- <= 0) {
-        // we have reached the desired depth, we are done
-        break;
-      }
-    }
-    if (method()->is_prefixed_native()) {
-      skip_prefixed_method_and_wrappers();
-    } else {
-      next();
-    }
+
+void vframeStreamCommon::security_next() {
+  if (method()->is_prefixed_native()) {
+    skip_prefixed_method_and_wrappers();  // calls next()
+  } else {
+    next();
   }
 }
 
--- a/hotspot/src/share/vm/runtime/vframe.hpp	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/src/share/vm/runtime/vframe.hpp	Thu Apr 04 17:01:34 2013 -0700
@@ -336,6 +336,7 @@
       _frame = _frame.sender(&_reg_map);
     } while (!fill_from_frame());
   }
+  void security_next();
 
   bool at_end() const { return _mode == at_end_mode; }
 
--- a/hotspot/test/compiler/8009761/Test8009761.java	Thu Apr 04 19:07:32 2013 +0200
+++ b/hotspot/test/compiler/8009761/Test8009761.java	Thu Apr 04 17:01:34 2013 -0700
@@ -25,7 +25,7 @@
  * @test
  * @bug 8009761
  * @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
+ * @run main/othervm -Xmixed -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
  *
  */