Merge
authordsamersoff
Sat, 05 Apr 2014 20:59:37 +0000
changeset 23869 41e28d356a5b
parent 23867 f144d430b1cf (current diff)
parent 23868 ac74f81a5157 (diff)
child 23870 2c9e85bccffb
Merge
--- a/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03
 099891b1d86f3719e116ac717ffdafc90d037fb7 jdk9-b04
 dd311791ad6895a3989020dd6c6c46db87972ab8 jdk9-b05
+85dbdc227c5e11429b4fc4a8ba763f50107edd6e jdk9-b06
--- a/.hgtags-top-repo	Sat Apr 05 21:33:11 2014 +0200
+++ b/.hgtags-top-repo	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
 cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
 8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
+d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
--- a/corba/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/corba/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
 1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
 167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
+a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
--- a/hotspot/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -408,3 +408,4 @@
 b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
 3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
 bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
+52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
--- a/hotspot/make/linux/Makefile	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/make/linux/Makefile	Sat Apr 05 20:59:37 2014 +0000
@@ -66,8 +66,8 @@
     FORCE_TIERED=1
   endif
 endif
-# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
-ifneq (,$(filter $(ARCH),ppc64 pp64le))
+# C1 is not ported on ppc64, so we cannot build a tiered VM:
+ifeq ($(ARCH),ppc64)
   FORCE_TIERED=0
 endif
 
--- a/hotspot/make/linux/makefiles/defs.make	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/make/linux/makefiles/defs.make	Sat Apr 05 20:59:37 2014 +0000
@@ -33,6 +33,11 @@
 # ARCH can be set explicitly in spec.gmk
 ifndef ARCH
   ARCH := $(shell uname -m)
+  # Fold little endian PowerPC64 into big-endian (if ARCH is set in
+  # hotspot-spec.gmk, this will be done by the configure script).
+  ifeq ($(ARCH),ppc64le)
+    ARCH := ppc64
+  endif
 endif
 
 PATH_SEP ?= :
--- a/hotspot/make/linux/makefiles/gcc.make	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/make/linux/makefiles/gcc.make	Sat Apr 05 20:59:37 2014 +0000
@@ -337,56 +337,20 @@
 ifeq ($(DEBUG_BINARIES), true)
   CFLAGS += -g
 else
-  # Use the stabs format for debugging information (this is the default
-  # on gcc-2.91). It's good enough, has all the information about line
-  # numbers and local variables, and libjvm.so is only about 16M.
-  # Change this back to "-g" if you want the most expressive format.
-  # (warning: that could easily inflate libjvm.so to 150M!)
-  # Note: The Itanium gcc compiler crashes when using -gstabs.
-  DEBUG_CFLAGS/ia64  = -g
-  DEBUG_CFLAGS/amd64 = -g
-  DEBUG_CFLAGS/arm   = -g
-  DEBUG_CFLAGS/ppc   = -g
-  DEBUG_CFLAGS/ppc64 = -g
   DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
-      ifeq ($(USE_CLANG), true)
-        # Clang doesn't understand -gstabs
-        DEBUG_CFLAGS += -g
-      else
-        DEBUG_CFLAGS += -gstabs
-      endif
+    DEBUG_CFLAGS += -g
   endif
-  
+
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    FASTDEBUG_CFLAGS/ia64  = -g
-    FASTDEBUG_CFLAGS/amd64 = -g
-    FASTDEBUG_CFLAGS/arm   = -g
-    FASTDEBUG_CFLAGS/ppc   = -g
-    FASTDEBUG_CFLAGS/ppc64 = -g
-    FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+    FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
-      ifeq ($(USE_CLANG), true)
-        # Clang doesn't understand -gstabs
-        FASTDEBUG_CFLAGS += -g
-      else
-        FASTDEBUG_CFLAGS += -gstabs
-      endif
+      FASTDEBUG_CFLAGS += -g
     endif
-  
-    OPT_CFLAGS/ia64  = -g
-    OPT_CFLAGS/amd64 = -g
-    OPT_CFLAGS/arm   = -g
-    OPT_CFLAGS/ppc   = -g
-    OPT_CFLAGS/ppc64 = -g
+
     OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
     ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
-      ifeq ($(USE_CLANG), true)
-        # Clang doesn't understand -gstabs
-        OPT_CFLAGS += -g
-      else
-        OPT_CFLAGS += -gstabs
-      endif
+      OPT_CFLAGS += -g
     endif
   endif
 endif
--- a/hotspot/make/linux/makefiles/ppc64.make	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/make/linux/makefiles/ppc64.make	Sat Apr 05 20:59:37 2014 +0000
@@ -26,14 +26,26 @@
 # make c code know it is on a 64 bit platform.
 CFLAGS += -D_LP64=1
 
-# fixes `relocation truncated to fit' error for gcc 4.1.
-CFLAGS += -mminimal-toc
+ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
+  # This can happen during hotspot standalone build. Set endianness from
+  # uname. We assume build and target machines are the same.
+  OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
+endif
 
-# finds use ppc64 instructions, but schedule for power5
-CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
+  $(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
+endif
 
-# let linker find external 64 bit libs.
-LFLAGS_VM += -L/lib64
+ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
+  # fixes `relocation truncated to fit' error for gcc 4.1.
+  CFLAGS += -mminimal-toc
 
-# specify lib format.
-LFLAGS_VM +=  -Wl,-melf64ppc
+  # finds use ppc64 instructions, but schedule for power5
+  CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+else
+  # Little endian machine uses ELFv2 ABI.
+  CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
+
+  # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
+  CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+endif
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -1025,15 +1025,14 @@
   }
 
   static void set_imm(int* instr, short s) {
-    short* p = ((short *)instr) + 1;
-    *p = s;
+    // imm is always in the lower 16 bits of the instruction,
+    // so this is endian-neutral. Same for the get_imm below.
+    uint32_t w = *(uint32_t *)instr;
+    *instr = (int)((w & ~0x0000FFFF) | (s & 0x0000FFFF));
   }
 
   static int get_imm(address a, int instruction_number) {
-    short imm;
-    short *p =((short *)a)+2*instruction_number+1;
-    imm = *p;
-    return (int)imm;
+    return (short)((int *)a)[instruction_number];
   }
 
   static inline int hi16_signed(  int x) { return (int)(int16_t)(x >> 16); }
--- a/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/bytes_ppc.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -35,6 +35,126 @@
 
   // Can I count on address always being a pointer to an unsigned char? Yes.
 
+#if defined(VM_LITTLE_ENDIAN)
+
+  // Returns true, if the byte ordering used by Java is different from the native byte ordering
+  // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
+  static inline bool is_Java_byte_ordering_different() { return true; }
+
+  // Forward declarations of the compiler-dependent implementation
+  static inline u2 swap_u2(u2 x);
+  static inline u4 swap_u4(u4 x);
+  static inline u8 swap_u8(u8 x);
+
+  static inline u2   get_native_u2(address p) {
+    return (intptr_t(p) & 1) == 0
+             ?   *(u2*)p
+             :   ( u2(p[1]) << 8 )
+               | ( u2(p[0])      );
+  }
+
+  static inline u4   get_native_u4(address p) {
+    switch (intptr_t(p) & 3) {
+     case 0:  return *(u4*)p;
+
+     case 2:  return (  u4( ((u2*)p)[1] ) << 16  )
+                   | (  u4( ((u2*)p)[0] )        );
+
+    default:  return ( u4(p[3]) << 24 )
+                   | ( u4(p[2]) << 16 )
+                   | ( u4(p[1]) <<  8 )
+                   |   u4(p[0]);
+    }
+  }
+
+  static inline u8   get_native_u8(address p) {
+    switch (intptr_t(p) & 7) {
+      case 0:  return *(u8*)p;
+
+      case 4:  return (  u8( ((u4*)p)[1] ) << 32  )
+                    | (  u8( ((u4*)p)[0] )        );
+
+      case 2:  return (  u8( ((u2*)p)[3] ) << 48  )
+                    | (  u8( ((u2*)p)[2] ) << 32  )
+                    | (  u8( ((u2*)p)[1] ) << 16  )
+                    | (  u8( ((u2*)p)[0] )        );
+
+     default:  return ( u8(p[7]) << 56 )
+                    | ( u8(p[6]) << 48 )
+                    | ( u8(p[5]) << 40 )
+                    | ( u8(p[4]) << 32 )
+                    | ( u8(p[3]) << 24 )
+                    | ( u8(p[2]) << 16 )
+                    | ( u8(p[1]) <<  8 )
+                    |   u8(p[0]);
+    }
+  }
+
+
+
+  static inline void put_native_u2(address p, u2 x) {
+    if ( (intptr_t(p) & 1) == 0 )  *(u2*)p = x;
+    else {
+      p[1] = x >> 8;
+      p[0] = x;
+    }
+  }
+
+  static inline void put_native_u4(address p, u4 x) {
+    switch ( intptr_t(p) & 3 ) {
+    case 0:  *(u4*)p = x;
+              break;
+
+    case 2:  ((u2*)p)[1] = x >> 16;
+             ((u2*)p)[0] = x;
+             break;
+
+    default: ((u1*)p)[3] = x >> 24;
+             ((u1*)p)[2] = x >> 16;
+             ((u1*)p)[1] = x >>  8;
+             ((u1*)p)[0] = x;
+             break;
+    }
+  }
+
+  static inline void put_native_u8(address p, u8 x) {
+    switch ( intptr_t(p) & 7 ) {
+    case 0:  *(u8*)p = x;
+             break;
+
+    case 4:  ((u4*)p)[1] = x >> 32;
+             ((u4*)p)[0] = x;
+             break;
+
+    case 2:  ((u2*)p)[3] = x >> 48;
+             ((u2*)p)[2] = x >> 32;
+             ((u2*)p)[1] = x >> 16;
+             ((u2*)p)[0] = x;
+             break;
+
+    default: ((u1*)p)[7] = x >> 56;
+             ((u1*)p)[6] = x >> 48;
+             ((u1*)p)[5] = x >> 40;
+             ((u1*)p)[4] = x >> 32;
+             ((u1*)p)[3] = x >> 24;
+             ((u1*)p)[2] = x >> 16;
+             ((u1*)p)[1] = x >>  8;
+             ((u1*)p)[0] = x;
+    }
+  }
+
+  // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
+  // (no byte-order reversal is needed since Power CPUs are big-endian oriented).
+  static inline u2   get_Java_u2(address p) { return swap_u2(get_native_u2(p)); }
+  static inline u4   get_Java_u4(address p) { return swap_u4(get_native_u4(p)); }
+  static inline u8   get_Java_u8(address p) { return swap_u8(get_native_u8(p)); }
+
+  static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, swap_u2(x)); }
+  static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, swap_u4(x)); }
+  static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, swap_u8(x)); }
+
+#else // !defined(VM_LITTLE_ENDIAN)
+
   // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
   // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
   static inline bool is_Java_byte_ordering_different() { return false; }
@@ -150,6 +270,12 @@
   static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, x); }
   static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, x); }
   static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, x); }
+
+#endif // VM_LITTLE_ENDIAN
 };
 
+#if defined(TARGET_OS_ARCH_linux_ppc)
+#include "bytes_linux_ppc.inline.hpp"
+#endif
+
 #endif // CPU_PPC_VM_BYTES_PPC_HPP
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -799,7 +799,13 @@
         if (UseCompressedOops && !wide) {
           __ movl(as_Address(addr), (int32_t)NULL_WORD);
         } else {
+#ifdef _LP64
+          __ xorptr(rscratch1, rscratch1);
+          null_check_here = code_offset();
+          __ movptr(as_Address(addr), rscratch1);
+#else
           __ movptr(as_Address(addr), NULL_WORD);
+#endif
         }
       } else {
         if (is_literal_address(addr)) {
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -59,9 +59,9 @@
 static const int stub_size = 600;
 
 extern "C" {
-  typedef void (*getPsrInfo_stub_t)(void*);
+  typedef void (*get_cpu_info_stub_t)(void*);
 }
-static getPsrInfo_stub_t getPsrInfo_stub = NULL;
+static get_cpu_info_stub_t get_cpu_info_stub = NULL;
 
 
 class VM_Version_StubGenerator: public StubCodeGenerator {
@@ -69,7 +69,7 @@
 
   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
 
-  address generate_getPsrInfo() {
+  address generate_get_cpu_info() {
     // Flags to test CPU type.
     const uint32_t HS_EFL_AC           = 0x40000;
     const uint32_t HS_EFL_ID           = 0x200000;
@@ -81,13 +81,13 @@
     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;
 
-    StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
+    StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
 #   define __ _masm->
 
     address start = __ pc();
 
     //
-    // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
+    // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
     //
     // LP64: rcx and rdx are first and second argument registers on windows
 
@@ -385,6 +385,14 @@
 };
 
 
+void VM_Version::get_cpu_info_wrapper() {
+  get_cpu_info_stub(&_cpuid_info);
+}
+
+#ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
+  #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
+#endif
+
 void VM_Version::get_processor_features() {
 
   _cpu = 4; // 486 by default
@@ -395,7 +403,11 @@
 
   if (!Use486InstrsOnly) {
     // Get raw processor info
-    getPsrInfo_stub(&_cpuid_info);
+
+    // Some platforms (like Win*) need a wrapper around here
+    // in order to properly handle SEGV for YMM registers test.
+    CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(get_cpu_info_wrapper);
+
     assert_is_initialized();
     _cpu = extended_cpu_family();
     _model = extended_cpu_model();
@@ -986,14 +998,14 @@
   ResourceMark rm;
   // Making this stub must be FIRST use of assembler
 
-  stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
+  stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
   if (stub_blob == NULL) {
-    vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
+    vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
   }
   CodeBuffer c(stub_blob);
   VM_Version_StubGenerator g(&c);
-  getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
-                                   g.generate_getPsrInfo());
+  get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
+                                     g.generate_get_cpu_info());
 
   get_processor_features();
 }
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -507,6 +507,7 @@
   // The value used to check ymm register after signal handle
   static int ymm_test_value()    { return 0xCAFEBABE; }
 
+  static void get_cpu_info_wrapper();
   static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
   static bool  is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
   static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -1932,7 +1932,11 @@
     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
+#if defined(VM_LITTLE_ENDIAN)
+    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
+#else
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+#endif
     {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
     {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -2702,7 +2702,6 @@
 }
 #endif
 
-#ifndef PRODUCT
 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
   // Install a win32 structured exception handler around the test
   // function call so the VM can generate an error dump if needed.
@@ -2713,7 +2712,6 @@
     // Nothing to do.
   }
 }
-#endif
 
 // Virtual Memory
 
--- a/hotspot/src/os/windows/vm/os_windows.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -101,9 +101,7 @@
   static address fast_jni_accessor_wrapper(BasicType);
 #endif
 
-#ifndef PRODUCT
   static void call_test_func_with_wrapper(void (*funcPtr)(void));
-#endif
 
   // filter function to ignore faults on serializations page
   static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e);
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -108,9 +108,7 @@
   return win32::_has_performance_count;
 }
 
-#ifndef PRODUCT
-  #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
-            os::win32::call_test_func_with_wrapper(f)
-#endif
+#define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
+        os::win32::call_test_func_with_wrapper(f)
 
 #endif // OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_ppc/vm/bytes_linux_ppc.inline.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2014 Google Inc.  All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
+#define OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
+
+#if defined(VM_LITTLE_ENDIAN)
+#include <byteswap.h>
+
+// Efficient swapping of data bytes from Java byte
+// ordering to native byte ordering and vice versa.
+inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); }
+inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); }
+inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); }
+#endif // VM_LITTLE_ENDIAN
+
+#endif // OS_CPU_LINUX_PPC_VM_BYTES_LINUX_PPC_INLINE_HPP
--- a/hotspot/src/share/vm/adlc/main.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/adlc/main.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -243,7 +243,6 @@
   AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
 #endif
 #ifdef TARGET_ARCH_ppc
-  AD.addInclude(AD._CPP_file, "assembler_ppc.inline.hpp");
   AD.addInclude(AD._CPP_file, "nativeInst_ppc.hpp");
   AD.addInclude(AD._CPP_file, "vmreg_ppc.inline.hpp");
 #endif
@@ -274,6 +273,7 @@
   AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp");  // Use PROB_MAX in predicate.
   AD.addInclude(AD._DFA_file, "opto/matcher.hpp");
   AD.addInclude(AD._DFA_file, "opto/opcodes.hpp");
+  AD.addInclude(AD._DFA_file, "opto/convertnode.hpp");
   // Make sure each .cpp file starts with include lines:
   // files declaring and defining generators for Mach* Objects (hpp,cpp)
   // Generate the result files:
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -581,14 +581,14 @@
  * Check whether profiling provides a type for the argument i to the
  * call at bci bci
  *
- * @param bci  bci of the call
- * @param i    argument number
- * @return     profiled type
+ * @param [in]bci         bci of the call
+ * @param [in]i           argument number
+ * @param [out]type       profiled type of argument, NULL if none
+ * @param [out]maybe_null true if null was seen for argument
+ * @return                true if profiling exists
  *
- * If the profile reports that the argument may be null, return false
- * at least for now.
  */
-ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
+bool ciMethod::argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null) {
   if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
     ciProfileData* data = method_data()->bci_to_data(bci);
     if (data != NULL) {
@@ -596,82 +596,77 @@
         assert_virtual_call_type_ok(bci);
         ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
         if (i >= call->number_of_arguments()) {
-          return NULL;
+          return false;
         }
-        ciKlass* type = call->valid_argument_type(i);
-        if (type != NULL && !call->argument_maybe_null(i)) {
-          return type;
-        }
+        type = call->valid_argument_type(i);
+        maybe_null = call->argument_maybe_null(i);
+        return true;
       } else if (data->is_CallTypeData()) {
         assert_call_type_ok(bci);
         ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
         if (i >= call->number_of_arguments()) {
-          return NULL;
+          return false;
         }
-        ciKlass* type = call->valid_argument_type(i);
-        if (type != NULL && !call->argument_maybe_null(i)) {
-          return type;
-        }
+        type = call->valid_argument_type(i);
+        maybe_null = call->argument_maybe_null(i);
+        return true;
       }
     }
   }
-  return NULL;
+  return false;
 }
 
 /**
  * Check whether profiling provides a type for the return value from
  * the call at bci bci
  *
- * @param bci  bci of the call
- * @return     profiled type
+ * @param [in]bci         bci of the call
+ * @param [out]type       profiled type of argument, NULL if none
+ * @param [out]maybe_null true if null was seen for argument
+ * @return                true if profiling exists
  *
- * If the profile reports that the argument may be null, return false
- * at least for now.
  */
-ciKlass* ciMethod::return_profiled_type(int bci) {
+bool ciMethod::return_profiled_type(int bci, ciKlass*& type, bool& maybe_null) {
   if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
     ciProfileData* data = method_data()->bci_to_data(bci);
     if (data != NULL) {
       if (data->is_VirtualCallTypeData()) {
         assert_virtual_call_type_ok(bci);
         ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
-        ciKlass* type = call->valid_return_type();
-        if (type != NULL && !call->return_maybe_null()) {
-          return type;
-        }
+        type = call->valid_return_type();
+        maybe_null = call->return_maybe_null();
+        return true;
       } else if (data->is_CallTypeData()) {
         assert_call_type_ok(bci);
         ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
-        ciKlass* type = call->valid_return_type();
-        if (type != NULL && !call->return_maybe_null()) {
-          return type;
-        }
+        type = call->valid_return_type();
+        maybe_null = call->return_maybe_null();
+        return true;
       }
     }
   }
-  return NULL;
+  return false;
 }
 
 /**
  * Check whether profiling provides a type for the parameter i
  *
- * @param i    parameter number
- * @return     profiled type
+ * @param [in]i           parameter number
+ * @param [out]type       profiled type of parameter, NULL if none
+ * @param [out]maybe_null true if null was seen for parameter
+ * @return                true if profiling exists
  *
- * If the profile reports that the argument may be null, return false
- * at least for now.
  */
-ciKlass* ciMethod::parameter_profiled_type(int i) {
+bool ciMethod::parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null) {
   if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
     ciParametersTypeData* parameters = method_data()->parameters_type_data();
     if (parameters != NULL && i < parameters->number_of_parameters()) {
-      ciKlass* type = parameters->valid_parameter_type(i);
-      if (type != NULL && !parameters->parameter_maybe_null(i)) {
-        return type;
-      }
+      type = parameters->valid_parameter_type(i);
+      maybe_null = parameters->parameter_maybe_null(i);
+      return true;
     }
   }
-  return NULL;
+  return false;
 }
 
 
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -234,10 +234,10 @@
   ciCallProfile call_profile_at_bci(int bci);
   int           interpreter_call_site_count(int bci);
 
-  // Does type profiling provide a useful type at this point?
-  ciKlass*      argument_profiled_type(int bci, int i);
-  ciKlass*      parameter_profiled_type(int i);
-  ciKlass*      return_profiled_type(int bci);
+  // Does type profiling provide any useful information at this point?
+  bool          argument_profiled_type(int bci, int i, ciKlass*& type, bool& maybe_null);
+  bool          parameter_profiled_type(int i, ciKlass*& type, bool& maybe_null);
+  bool          return_profiled_type(int bci, ciKlass*& type, bool& maybe_null);
 
   ciField*      get_field_at_bci( int bci, bool &will_link);
   ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -135,6 +135,14 @@
   }
 }
 
+void ClassLoaderData::methods_do(void f(Method*)) {
+  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+    if (k->oop_is_instance()) {
+      InstanceKlass::cast(k)->methods_do(f);
+    }
+  }
+}
+
 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
   // Lock to avoid classes being modified/added/removed during iteration
   MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
@@ -624,6 +632,12 @@
   }
 }
 
+void ClassLoaderDataGraph::methods_do(void f(Method*)) {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    cld->methods_do(f);
+  }
+}
+
 void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
     cld->loaded_classes_do(klass_closure);
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -78,6 +78,7 @@
   static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
+  static void methods_do(void f(Method*));
   static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
   static bool do_unloading(BoolObjectClosure* is_alive);
@@ -189,6 +190,7 @@
   void classes_do(void f(Klass*));
   void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
+  void methods_do(void f(Method*));
 
   // Deallocate free list during class unloading.
   void free_deallocate_list();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -116,10 +116,6 @@
   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
 };
 
 class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
@@ -132,10 +128,6 @@
   Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
 };
 
 // A variant of the above used in certain kinds of CMS
@@ -152,10 +144,6 @@
                             CMSBitMap* cms_bm);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
 };
 
 // The non-parallel version (the parallel version appears further below).
@@ -181,10 +169,6 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
-
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
 };
 
 // In the parallel case, the bit map and the
@@ -211,10 +195,6 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
-
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
 };
 
 // The non-parallel version (the parallel version appears further below).
@@ -245,9 +225,6 @@
   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
   void set_freelistLock(Mutex* m) {
     _freelistLock = m;
   }
@@ -282,9 +259,6 @@
   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_read;
-  }
   void trim_queue(uint size);
 };
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -851,42 +851,60 @@
                                                   UpwardsObjectClosure* cl) {
   assert_locked(freelistLock());
   NOT_PRODUCT(verify_objects_initialized());
-  Space::object_iterate_mem(mr, cl);
+  assert(!mr.is_empty(), "Should be non-empty");
+  // We use MemRegion(bottom(), end()) rather than used_region() below
+  // because the two are not necessarily equal for some kinds of
+  // spaces, in particular, certain kinds of free list spaces.
+  // We could use the more complicated but more precise:
+  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
+  // but the slight imprecision seems acceptable in the assertion check.
+  assert(MemRegion(bottom(), end()).contains(mr),
+         "Should be within used space");
+  HeapWord* prev = cl->previous();   // max address from last time
+  if (prev >= mr.end()) { // nothing to do
+    return;
+  }
+  // This assert will not work when we go from cms space to perm
+  // space, and use same closure. Easy fix deferred for later. XXX YSR
+  // assert(prev == NULL || contains(prev), "Should be within space");
+
+  bool last_was_obj_array = false;
+  HeapWord *blk_start_addr, *region_start_addr;
+  if (prev > mr.start()) {
+    region_start_addr = prev;
+    blk_start_addr    = prev;
+    // The previous invocation may have pushed "prev" beyond the
+    // last allocated block yet there may be still be blocks
+    // in this region due to a particular coalescing policy.
+    // Relax the assertion so that the case where the unallocated
+    // block is maintained and "prev" is beyond the unallocated
+    // block does not cause the assertion to fire.
+    assert((BlockOffsetArrayUseUnallocatedBlock &&
+            (!is_in(prev))) ||
+           (blk_start_addr == block_start(region_start_addr)), "invariant");
+  } else {
+    region_start_addr = mr.start();
+    blk_start_addr    = block_start(region_start_addr);
+  }
+  HeapWord* region_end_addr = mr.end();
+  MemRegion derived_mr(region_start_addr, region_end_addr);
+  while (blk_start_addr < region_end_addr) {
+    const size_t size = block_size(blk_start_addr);
+    if (block_is_obj(blk_start_addr)) {
+      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
+    } else {
+      last_was_obj_array = false;
+    }
+    blk_start_addr += size;
+  }
+  if (!last_was_obj_array) {
+    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
+           "Should be within (closed) used space");
+    assert(blk_start_addr > prev, "Invariant");
+    cl->set_previous(blk_start_addr); // min address for next time
+  }
 }
 
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *addr, *last;
-  size_t size;
-  for (addr = bottom(), last  = end();
-       addr < last; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->is_free()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful(oop(addr));
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
 
 // Callers of this iterator beware: The closure application should
 // be robust in the face of uninitialized objects and should (always)
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -338,10 +338,6 @@
                      unallocated_block() : end());
   }
 
-  bool is_in(const void* p) const {
-    return used_region().contains(p);
-  }
-
   virtual bool is_free_block(const HeapWord* p) const;
 
   // Resizing support
@@ -363,6 +359,12 @@
   // obj_is_alive() to determine whether it is safe to iterate of
   // an object.
   void safe_object_iterate(ObjectClosure* blk);
+
+  // Iterate over all objects that intersect with mr, calling "cl->do_object"
+  // on each.  There is an exception to this: if this closure has already
+  // been invoked on an object, it may skip such objects in some cases.  This is
+  // Most likely to happen in an "upwards" (ascending address) iteration of
+  // MemRegions.
   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 
   // Requires that "mr" be entirely within the space.
@@ -371,11 +373,8 @@
   // terminate the iteration and return the address of the start of the
   // subregion that isn't done.  Return of "NULL" indicates that the
   // iteration completed.
-  virtual HeapWord*
-       object_iterate_careful_m(MemRegion mr,
-                                ObjectClosureCareful* cl);
-  virtual HeapWord*
-       object_iterate_careful(ObjectClosureCareful* cl);
+  HeapWord* object_iterate_careful_m(MemRegion mr,
+                                     ObjectClosureCareful* cl);
 
   // Override: provides a DCTO_CL specific to this kind of space.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -1498,6 +1498,19 @@
   }
 };
 
+// A version of ObjectClosure with "memory" (see _previous_address below)
+class UpwardsObjectClosure: public BoolObjectClosure {
+  HeapWord* _previous_address;
+ public:
+  UpwardsObjectClosure() : _previous_address(NULL) { }
+  void set_previous(HeapWord* addr) { _previous_address = addr; }
+  HeapWord* previous()              { return _previous_address; }
+  // A return value of "true" can be used by the caller to decide
+  // if this object's end should *NOT* be recorded in
+  // _previous_address above.
+  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
+};
+
 // This closure is used during the second checkpointing phase
 // to rescan the marked objects on the dirty cards in the mod
 // union table and the card table proper. It's invoked via
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -3529,6 +3529,29 @@
   }
 };
 
+bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
+                                       const HeapRegion* hr,
+                                       const VerifyOption vo) const {
+  switch (vo) {
+  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
+  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
+  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  default:                            ShouldNotReachHere();
+  }
+  return false; // keep some compilers happy
+}
+
+bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
+                                       const VerifyOption vo) const {
+  switch (vo) {
+  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
+  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
+  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
+  default:                            ShouldNotReachHere();
+  }
+  return false; // keep some compilers happy
+}
+
 void G1CollectedHeap::print_on(outputStream* st) const {
   st->print(" %-20s", "garbage-first heap");
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
@@ -6598,13 +6621,13 @@
     if (hr->is_young()) {
       // TODO
     } else if (hr->startsHumongous()) {
-      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
+      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
-      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
+      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
       _free_count.increment(1u, hr->capacity());
     } else {
-      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
+      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
       _old_count.increment(1u, hr->capacity());
     }
     return false;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -706,19 +706,7 @@
   // This is a fast test on whether a reference points into the
   // collection set or not. Assume that the reference
   // points into the heap.
-  bool in_cset_fast_test(oop obj) {
-    assert(_in_cset_fast_test != NULL, "sanity");
-    assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
-    // no need to subtract the bottom of the heap from obj,
-    // _in_cset_fast_test is biased
-    uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
-    bool ret = _in_cset_fast_test[index];
-    // let's make sure the result is consistent with what the slower
-    // test returns
-    assert( ret || !obj_in_cs(obj), "sanity");
-    assert(!ret ||  obj_in_cs(obj), "sanity");
-    return ret;
-  }
+  inline bool in_cset_fast_test(oop obj);
 
   void clear_cset_fast_test() {
     assert(_in_cset_fast_test_base != NULL, "sanity");
@@ -1250,9 +1238,7 @@
     }
   }
 
-  void old_set_remove(HeapRegion* hr) {
-    _old_set.remove(hr);
-  }
+  inline void old_set_remove(HeapRegion* hr);
 
   size_t non_young_capacity_bytes() {
     return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
@@ -1343,7 +1329,7 @@
   void heap_region_iterate(HeapRegionClosure* blk) const;
 
   // Return the region with the given index. It assumes the index is valid.
-  HeapRegion* region_at(uint index) const { return _hrs.at(index); }
+  inline HeapRegion* region_at(uint index) const;
 
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
@@ -1472,10 +1458,7 @@
     return true;
   }
 
-  bool is_in_young(const oop obj) {
-    HeapRegion* hr = heap_region_containing(obj);
-    return hr != NULL && hr->is_young();
-  }
+  inline bool is_in_young(const oop obj);
 
 #ifdef ASSERT
   virtual bool is_in_partial_collection(const void* p);
@@ -1488,9 +1471,7 @@
   // pre-value that needs to be remembered; for the remembered-set
   // update logging post-barrier, we don't maintain remembered set
   // information for young gen objects.
-  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
-    return is_in_young(new_obj);
-  }
+  virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
 
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
@@ -1584,23 +1565,9 @@
 
   // Added if it is NULL it isn't dead.
 
-  bool is_obj_dead(const oop obj) const {
-    const HeapRegion* hr = heap_region_containing(obj);
-    if (hr == NULL) {
-      if (obj == NULL) return false;
-      else return true;
-    }
-    else return is_obj_dead(obj, hr);
-  }
+  inline bool is_obj_dead(const oop obj) const;
 
-  bool is_obj_ill(const oop obj) const {
-    const HeapRegion* hr = heap_region_containing(obj);
-    if (hr == NULL) {
-      if (obj == NULL) return false;
-      else return true;
-    }
-    else return is_obj_ill(obj, hr);
-  }
+  inline bool is_obj_ill(const oop obj) const;
 
   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
@@ -1694,26 +1661,10 @@
 
   bool is_obj_dead_cond(const oop obj,
                         const HeapRegion* hr,
-                        const VerifyOption vo) const {
-    switch (vo) {
-    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
-    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
-    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
-    default:                            ShouldNotReachHere();
-    }
-    return false; // keep some compilers happy
-  }
+                        const VerifyOption vo) const;
 
   bool is_obj_dead_cond(const oop obj,
-                        const VerifyOption vo) const {
-    switch (vo) {
-    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
-    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
-    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
-    default:                            ShouldNotReachHere();
-    }
-    return false; // keep some compilers happy
-  }
+                        const VerifyOption vo) const;
 
   // Printing
 
@@ -1807,11 +1758,7 @@
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
-  template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
-    if (!from->is_survivor()) {
-      _g1_rem->par_write_ref(from, p, tid);
-    }
-  }
+  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
 
   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
     // If the new value of the field points to the same region or
@@ -1853,13 +1800,7 @@
     refs()->push(ref);
   }
 
-  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
-    if (G1DeferredRSUpdate) {
-      deferred_rs_update(from, p, tid);
-    } else {
-      immediate_rs_update(from, p, tid);
-    }
-  }
+  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
 
   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
     HeapWord* obj = NULL;
@@ -1983,54 +1924,7 @@
     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
   }
 
-  void do_oop_partial_array(oop* p) {
-    assert(has_partial_array_mask(p), "invariant");
-    oop from_obj = clear_partial_array_mask(p);
-
-    assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
-    assert(from_obj->is_objArray(), "must be obj array");
-    objArrayOop from_obj_array = objArrayOop(from_obj);
-    // The from-space object contains the real length.
-    int length                 = from_obj_array->length();
-
-    assert(from_obj->is_forwarded(), "must be forwarded");
-    oop to_obj                 = from_obj->forwardee();
-    assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
-    objArrayOop to_obj_array   = objArrayOop(to_obj);
-    // We keep track of the next start index in the length field of the
-    // to-space object.
-    int next_index             = to_obj_array->length();
-    assert(0 <= next_index && next_index < length,
-           err_msg("invariant, next index: %d, length: %d", next_index, length));
-
-    int start                  = next_index;
-    int end                    = length;
-    int remainder              = end - start;
-    // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
-    if (remainder > 2 * ParGCArrayScanChunk) {
-      end = start + ParGCArrayScanChunk;
-      to_obj_array->set_length(end);
-      // Push the remainder before we process the range in case another
-      // worker has run out of things to do and can steal it.
-      oop* from_obj_p = set_partial_array_mask(from_obj);
-      push_on_queue(from_obj_p);
-    } else {
-      assert(length == end, "sanity");
-      // We'll process the final range for this object. Restore the length
-      // so that the heap remains parsable in case of evacuation failure.
-      to_obj_array->set_length(end);
-    }
-    _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
-    // Process indexes [start,end). It will also process the header
-    // along with the first chunk (i.e., the chunk with start == 0).
-    // Note that at this point the length field of to_obj_array is not
-    // correct given that we are using it to keep track of the next
-    // start index. oop_iterate_range() (thankfully!) ignores the length
-    // field and only relies on the start / end parameters.  It does
-    // however return the size of the object which will be incorrect. So
-    // we have to ignore it even if we wanted to use it.
-    to_obj_array->oop_iterate_range(&_scanner, start, end);
-  }
+  inline void do_oop_partial_array(oop* p);
 
   // This method is applied to the fields of the objects that have just been copied.
   template <class T> void do_oop_evac(T* p, HeapRegion* from) {
@@ -2060,26 +1954,9 @@
 
   oop copy_to_survivor_space(oop const obj);
 
-  template <class T> void deal_with_reference(T* ref_to_scan) {
-    if (!has_partial_array_mask(ref_to_scan)) {
-      // Note: we can use "raw" versions of "region_containing" because
-      // "obj_to_scan" is definitely in the heap, and is not in a
-      // humongous region.
-      HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-      do_oop_evac(ref_to_scan, r);
-    } else {
-      do_oop_partial_array((oop*)ref_to_scan);
-    }
-  }
+  template <class T> inline void deal_with_reference(T* ref_to_scan);
 
-  void deal_with_reference(StarTask ref) {
-    assert(verify_task(ref), "sanity");
-    if (ref.is_narrow()) {
-      deal_with_reference((narrowOop*)ref);
-    } else {
-      deal_with_reference((oop*)ref);
-    }
-  }
+  inline void deal_with_reference(StarTask ref);
 
 public:
   void trim_queue();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -36,6 +37,9 @@
 
 // Inline functions for G1CollectedHeap
 
+// Return the region with the given index. It assumes the index is valid.
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
+
 template <class T>
 inline HeapRegion*
 G1CollectedHeap::heap_region_containing(const T addr) const {
@@ -55,6 +59,10 @@
   return res;
 }
 
+inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
+  _old_set.remove(hr);
+}
+
 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
   return r != NULL && r->in_collection_set();
@@ -151,6 +159,24 @@
   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 }
 
+
+// This is a fast test on whether a reference points into the
+// collection set or not. Assume that the reference
+// points into the heap.
+inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
+  assert(_in_cset_fast_test != NULL, "sanity");
+  assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
+  // no need to subtract the bottom of the heap from obj,
+  // _in_cset_fast_test is biased
+  uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
+  bool ret = _in_cset_fast_test[index];
+  // let's make sure the result is consistent with what the slower
+  // test returns
+  assert( ret || !obj_in_cs(obj), "sanity");
+  assert(!ret ||  obj_in_cs(obj), "sanity");
+  return ret;
+}
+
 #ifndef PRODUCT
 // Support for G1EvacuationFailureALot
 
@@ -224,4 +250,121 @@
 }
 #endif  // #ifndef PRODUCT
 
+inline bool G1CollectedHeap::is_in_young(const oop obj) {
+  HeapRegion* hr = heap_region_containing(obj);
+  return hr != NULL && hr->is_young();
+}
+
+// We don't need barriers for initializing stores to objects
+// in the young gen: for the SATB pre-barrier, there is no
+// pre-value that needs to be remembered; for the remembered-set
+// update logging post-barrier, we don't maintain remembered set
+// information for young gen objects.
+inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  return is_in_young(new_obj);
+}
+
+inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
+  const HeapRegion* hr = heap_region_containing(obj);
+  if (hr == NULL) {
+    if (obj == NULL) return false;
+    else return true;
+  }
+  else return is_obj_dead(obj, hr);
+}
+
+inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
+  const HeapRegion* hr = heap_region_containing(obj);
+  if (hr == NULL) {
+    if (obj == NULL) return false;
+    else return true;
+  }
+  else return is_obj_ill(obj, hr);
+}
+
+template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
+  if (!from->is_survivor()) {
+    _g1_rem->par_write_ref(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
+  if (G1DeferredRSUpdate) {
+    deferred_rs_update(from, p, tid);
+  } else {
+    immediate_rs_update(from, p, tid);
+  }
+}
+
+
+inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
+  assert(has_partial_array_mask(p), "invariant");
+  oop from_obj = clear_partial_array_mask(p);
+
+  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(from_obj->is_objArray(), "must be obj array");
+  objArrayOop from_obj_array = objArrayOop(from_obj);
+  // The from-space object contains the real length.
+  int length                 = from_obj_array->length();
+
+  assert(from_obj->is_forwarded(), "must be forwarded");
+  oop to_obj                 = from_obj->forwardee();
+  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
+  objArrayOop to_obj_array   = objArrayOop(to_obj);
+  // We keep track of the next start index in the length field of the
+  // to-space object.
+  int next_index             = to_obj_array->length();
+  assert(0 <= next_index && next_index < length,
+         err_msg("invariant, next index: %d, length: %d", next_index, length));
+
+  int start                  = next_index;
+  int end                    = length;
+  int remainder              = end - start;
+  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    end = start + ParGCArrayScanChunk;
+    to_obj_array->set_length(end);
+    // Push the remainder before we process the range in case another
+    // worker has run out of things to do and can steal it.
+    oop* from_obj_p = set_partial_array_mask(from_obj);
+    push_on_queue(from_obj_p);
+  } else {
+    assert(length == end, "sanity");
+    // We'll process the final range for this object. Restore the length
+    // so that the heap remains parsable in case of evacuation failure.
+    to_obj_array->set_length(end);
+  }
+  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  // Process indexes [start,end). It will also process the header
+  // along with the first chunk (i.e., the chunk with start == 0).
+  // Note that at this point the length field of to_obj_array is not
+  // correct given that we are using it to keep track of the next
+  // start index. oop_iterate_range() (thankfully!) ignores the length
+  // field and only relies on the start / end parameters.  It does
+  // however return the size of the object which will be incorrect. So
+  // we have to ignore it even if we wanted to use it.
+  to_obj_array->oop_iterate_range(&_scanner, start, end);
+}
+
+template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
+  if (!has_partial_array_mask(ref_to_scan)) {
+    // Note: we can use "raw" versions of "region_containing" because
+    // "obj_to_scan" is definitely in the heap, and is not in a
+    // humongous region.
+    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    do_oop_evac(ref_to_scan, r);
+  } else {
+    do_oop_partial_array((oop*)ref_to_scan);
+  }
+}
+
+inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
+  assert(verify_task(ref), "sanity");
+  if (ref.is_narrow()) {
+    deal_with_reference((narrowOop*)ref);
+  } else {
+    deal_with_reference((oop*)ref);
+  }
+}
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -472,9 +472,6 @@
     } else if (!g1h->is_obj_dead(obj)) {
       cl->do_object(obj);
     }
-    if (cl->abort()) return cur;
-    // The check above must occur before the operation below, since an
-    // abort might invalidate the "size" operation.
     cur += obj->size();
   }
   return NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
 
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "memory/allocation.hpp"
 #include "memory/cardTableModRefBS.hpp"
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -89,6 +89,15 @@
   assert(((_gc_cause != GCCause::_no_gc) &&
           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
 
+  // To be able to handle a GC the VM initialization needs to be completed.
+  if (!is_init_completed()) {
+    vm_exit_during_initialization(
+      err_msg("GC triggered before VM initialization completed. Try increasing "
+              "NewSize, current value " UINTX_FORMAT "%s.",
+              byte_size_in_proper_unit(NewSize),
+              proper_unit_for_byte_size(NewSize)));
+  }
+
   acquire_pending_list_lock();
   // If the GC count has changed someone beat us to the collection
   // Get the Heap_lock after the pending_list_lock.
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -35,8 +35,6 @@
 #include "runtime/timer.hpp"
 
 
-#ifndef PRODUCT
-
 // Standard closure for BytecodeTracer: prints the current bytecode
 // and its attributes using bytecode-specific information.
 
@@ -600,4 +598,3 @@
     }
   }
 }
-#endif // PRODUCT
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -34,8 +34,7 @@
 // By specialising the BytecodeClosure, all kinds of bytecode traces can
 // be done.
 
-#ifndef PRODUCT
-// class BytecodeTracer is only used by TraceBytecodes option
+// class BytecodeTracer is used by TraceBytecodes option and PrintMethodData
 
 class BytecodeClosure;
 class BytecodeTracer: AllStatic {
@@ -60,6 +59,4 @@
   virtual void trace(methodHandle method, address bcp, outputStream* st) = 0;
 };
 
-#endif // !PRODUCT
-
 #endif // SHARE_VM_INTERPRETER_BYTECODETRACER_HPP
--- a/hotspot/src/share/vm/memory/allocation.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -748,6 +748,12 @@
   bool _use_malloc;
   size_t _size;
   bool _free_in_destructor;
+
+  static bool should_use_malloc(size_t size) {
+    return size < ArrayAllocatorMallocLimit;
+  }
+
+  static char* allocate_inner(size_t& size, bool& use_malloc);
  public:
   ArrayAllocator(bool free_in_destructor = true) :
     _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
@@ -759,6 +765,7 @@
   }
 
   E* allocate(size_t length);
+  E* reallocate(size_t new_length);
   void free();
 };
 
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -122,35 +122,57 @@
 }
 
 template <class E, MEMFLAGS F>
+char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
+  char* addr = NULL;
+
+  if (use_malloc) {
+    addr = AllocateHeap(size, F);
+    if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
+      // malloc failed let's try with mmap instead
+      use_malloc = false;
+    } else {
+      return addr;
+    }
+  }
+
+  int alignment = os::vm_allocation_granularity();
+  size = align_size_up(size, alignment);
+
+  addr = os::reserve_memory(size, NULL, alignment, F);
+  if (addr == NULL) {
+    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
+  }
+
+  os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
+  return addr;
+}
+
+template <class E, MEMFLAGS F>
 E* ArrayAllocator<E, F>::allocate(size_t length) {
   assert(_addr == NULL, "Already in use");
 
   _size = sizeof(E) * length;
-  _use_malloc = _size < ArrayAllocatorMallocLimit;
-
-  if (_use_malloc) {
-    _addr = AllocateHeap(_size, F);
-    if (_addr == NULL && _size >=  (size_t)os::vm_allocation_granularity()) {
-      // malloc failed let's try with mmap instead
-      _use_malloc = false;
-    } else {
-      return (E*)_addr;
-    }
-  }
-
-  int alignment = os::vm_allocation_granularity();
-  _size = align_size_up(_size, alignment);
-
-  _addr = os::reserve_memory(_size, NULL, alignment, F);
-  if (_addr == NULL) {
-    vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
-  }
-
-  os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
+  _use_malloc = should_use_malloc(_size);
+  _addr = allocate_inner(_size, _use_malloc);
 
   return (E*)_addr;
 }
 
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
+  size_t new_size = sizeof(E) * new_length;
+  bool use_malloc = should_use_malloc(new_size);
+  char* new_addr = allocate_inner(new_size, use_malloc);
+
+  memcpy(new_addr, _addr, MIN2(new_size, _size));
+
+  free();
+  _size = new_size;
+  _use_malloc = use_malloc;
+  _addr = new_addr;
+  return (E*)new_addr;
+}
+
 template<class E, MEMFLAGS F>
 void ArrayAllocator<E, F>::free() {
   if (_addr != NULL) {
--- a/hotspot/src/share/vm/memory/gcLocker.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -28,7 +28,6 @@
 #include "memory/sharedHeap.hpp"
 
 volatile jint GC_locker::_jni_lock_count = 0;
-volatile jint GC_locker::_lock_count     = 0;
 volatile bool GC_locker::_needs_gc       = false;
 volatile bool GC_locker::_doing_gc       = false;
 
@@ -102,7 +101,7 @@
   // We check that at least one thread is in a critical region before
   // blocking because blocked threads are woken up by a thread exiting
   // a JNI critical region.
-  while ((needs_gc() && is_jni_active()) || _doing_gc) {
+  while (is_active_and_needs_gc() || _doing_gc) {
     JNICritical_lock->wait();
   }
   thread->enter_critical();
@@ -116,27 +115,20 @@
   _jni_lock_count--;
   decrement_debug_jni_lock_count();
   thread->exit_critical();
-  if (needs_gc() && !is_jni_active()) {
+  if (needs_gc() && !is_active_internal()) {
     // We're the last thread out. Cause a GC to occur.
-    // GC will also check is_active, so this check is not
-    // strictly needed. It's added here to make it clear that
-    // the GC will NOT be performed if any other caller
-    // of GC_locker::lock() still needs GC locked.
-    if (!is_active_internal()) {
-      _doing_gc = true;
-      {
-        // Must give up the lock while at a safepoint
-        MutexUnlocker munlock(JNICritical_lock);
-        if (PrintJNIGCStalls && PrintGCDetails) {
-          ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-          gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
-                                 gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
-        }
-        Universe::heap()->collect(GCCause::_gc_locker);
+    _doing_gc = true;
+    {
+      // Must give up the lock while at a safepoint
+      MutexUnlocker munlock(JNICritical_lock);
+      if (PrintJNIGCStalls && PrintGCDetails) {
+        ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
+        gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
+            gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
       }
-      _doing_gc = false;
+      Universe::heap()->collect(GCCause::_gc_locker);
     }
-
+    _doing_gc = false;
     _needs_gc = false;
     JNICritical_lock->notify_all();
   }
--- a/hotspot/src/share/vm/memory/gcLocker.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -54,8 +54,6 @@
   // safepointing and decremented during the slow path of GC_locker
   // unlocking.
   static volatile jint _jni_lock_count;  // number of jni active instances.
-
-  static volatile jint _lock_count;      // number of other active instances
   static volatile bool _needs_gc;        // heap is filling, we need a GC
                                          // note: bool is typedef'd as jint
   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
@@ -66,12 +64,6 @@
   static volatile jint _debug_jni_lock_count;
 #endif
 
-  // Accessors
-  static bool is_jni_active() {
-    assert(_needs_gc, "only valid when _needs_gc is set");
-    return _jni_lock_count > 0;
-  }
-
   // At a safepoint, visit all threads and count the number of active
   // critical sections.  This is used to ensure that all active
   // critical sections are exited before a new one is started.
@@ -82,7 +74,7 @@
 
   static bool is_active_internal() {
     verify_critical_count();
-    return _lock_count > 0 || _jni_lock_count > 0;
+    return _jni_lock_count > 0;
   }
 
  public:
@@ -132,10 +124,6 @@
   // not a stable predicate.
   static void stall_until_clear();
 
-  // Non-structured GC locking: currently needed for JNI. Use with care!
-  static void lock();
-  static void unlock();
-
   // The following two methods are used for JNI critical regions.
   // If we find that we failed to perform a GC because the GC_locker
   // was active, arrange for one as soon as possible by allowing
--- a/hotspot/src/share/vm/memory/gcLocker.inline.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/gcLocker.inline.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,22 +27,6 @@
 
 #include "memory/gcLocker.hpp"
 
-inline void GC_locker::lock() {
-  // cast away volatile
-  Atomic::inc(&_lock_count);
-  CHECK_UNHANDLED_OOPS_ONLY(
-    if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
-  assert(Universe::heap() == NULL ||
-         !Universe::heap()->is_gc_active(), "locking failed");
-}
-
-inline void GC_locker::unlock() {
-  // cast away volatile
-  Atomic::dec(&_lock_count);
-  CHECK_UNHANDLED_OOPS_ONLY(
-    if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
-}
-
 inline void GC_locker::lock_critical(JavaThread* thread) {
   if (!thread->in_critical()) {
     if (needs_gc()) {
--- a/hotspot/src/share/vm/memory/genOopClosures.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/genOopClosures.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -115,9 +115,6 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p);
   inline void do_oop_nv(narrowOop* p);
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_write;
-  }
 };
 
 // Closure for scanning DefNewGeneration.
@@ -137,9 +134,6 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p);
   inline void do_oop_nv(narrowOop* p);
-  Prefetch::style prefetch_style() {
-    return Prefetch::do_write;
-  }
 };
 
 class KlassScanClosure: public KlassClosure {
--- a/hotspot/src/share/vm/memory/iterator.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,11 +27,8 @@
 
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
-#include "runtime/prefetch.hpp"
 #include "utilities/top.hpp"
 
-// The following classes are C++ `closures` for iterating over objects, roots and spaces
-
 class CodeBlob;
 class nmethod;
 class ReferenceProcessor;
@@ -39,22 +36,11 @@
 class KlassClosure;
 class ClassLoaderData;
 
-// Closure provides abortability.
+// The following classes are C++ `closures` for iterating over objects, roots and spaces
 
-class Closure : public StackObj {
- protected:
-  bool _abort;
-  void set_abort() { _abort = true; }
- public:
-  Closure() : _abort(false) {}
-  // A subtype can use this mechanism to indicate to some iterator mapping
-  // functions that the iteration should cease.
-  bool abort() { return _abort; }
-  void clear_abort() { _abort = false; }
-};
+class Closure : public StackObj { };
 
 // OopClosure is used for iterating through references to Java objects.
-
 class OopClosure : public Closure {
  public:
   virtual void do_oop(oop* o) = 0;
@@ -97,11 +83,6 @@
 
   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
 
-  // Controls how prefetching is done for invocations of this closure.
-  Prefetch::style prefetch_style() { // Note that this is non-virtual.
-    return Prefetch::do_none;
-  }
-
   // True iff this closure may be safely applied more than once to an oop
   // location without an intervening "major reset" (like the end of a GC).
   virtual bool idempotent() { return false; }
@@ -177,19 +158,6 @@
   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 };
 
-// A version of ObjectClosure with "memory" (see _previous_address below)
-class UpwardsObjectClosure: public BoolObjectClosure {
-  HeapWord* _previous_address;
- public:
-  UpwardsObjectClosure() : _previous_address(NULL) { }
-  void set_previous(HeapWord* addr) { _previous_address = addr; }
-  HeapWord* previous()              { return _previous_address; }
-  // A return value of "true" can be used by the caller to decide
-  // if this object's end should *NOT* be recorded in
-  // _previous_address above.
-  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
-};
-
 // A version of ObjectClosure that is expected to be robust
 // in the face of possibly uninitialized objects.
 class ObjectClosureCareful : public ObjectClosure {
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -645,9 +645,6 @@
   TraceTime timer("Dump Shared Spaces", TraceStartupTime);
   ResourceMark rm;
 
-  // Lock out GC - is it necessary? I don't think we care.
-  No_GC_Verifier no_gc;
-
   // Preload classes to be shared.
   // Should use some os:: method rather than fopen() here. aB.
   // Construct the path to the class list (in jre/lib)
--- a/hotspot/src/share/vm/memory/space.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/space.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -302,10 +302,6 @@
   CompactibleSpace::clear(mangle_space);
 }
 
-bool ContiguousSpace::is_in(const void* p) const {
-  return _bottom <= p && p < _top;
-}
-
 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
   return p >= _top;
 }
@@ -547,115 +543,11 @@
   object_iterate(&blk2);
 }
 
-HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
-  guarantee(false, "NYI");
-  return bottom();
-}
-
-HeapWord* Space::object_iterate_careful_m(MemRegion mr,
-                                          ObjectClosureCareful* cl) {
-  guarantee(false, "NYI");
-  return bottom();
-}
-
-
-void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
-  assert(!mr.is_empty(), "Should be non-empty");
-  // We use MemRegion(bottom(), end()) rather than used_region() below
-  // because the two are not necessarily equal for some kinds of
-  // spaces, in particular, certain kinds of free list spaces.
-  // We could use the more complicated but more precise:
-  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
-  // but the slight imprecision seems acceptable in the assertion check.
-  assert(MemRegion(bottom(), end()).contains(mr),
-         "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // This assert will not work when we go from cms space to perm
-  // space, and use same closure. Easy fix deferred for later. XXX YSR
-  // assert(prev == NULL || contains(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *blk_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    blk_start_addr    = prev;
-    // The previous invocation may have pushed "prev" beyond the
-    // last allocated block yet there may be still be blocks
-    // in this region due to a particular coalescing policy.
-    // Relax the assertion so that the case where the unallocated
-    // block is maintained and "prev" is beyond the unallocated
-    // block does not cause the assertion to fire.
-    assert((BlockOffsetArrayUseUnallocatedBlock &&
-            (!is_in(prev))) ||
-           (blk_start_addr == block_start(region_start_addr)), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    blk_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (blk_start_addr < region_end_addr) {
-    const size_t size = block_size(blk_start_addr);
-    if (block_is_obj(blk_start_addr)) {
-      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
-    } else {
-      last_was_obj_array = false;
-    }
-    blk_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(blk_start_addr > prev, "Invariant");
-    cl->set_previous(blk_start_addr); // min address for next time
-  }
-}
-
 bool Space::obj_is_alive(const HeapWord* p) const {
   assert (block_is_obj(p), "The address should point to an object");
   return true;
 }
 
-void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
-  assert(!mr.is_empty(), "Should be non-empty");
-  assert(used_region().contains(mr), "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // See comment above (in more general method above) in case you
-  // happen to use this method.
-  assert(prev == NULL || is_in_reserved(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *obj_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    obj_start_addr    = prev;
-    assert(obj_start_addr == block_start(region_start_addr), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    obj_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (obj_start_addr < region_end_addr) {
-    oop obj = oop(obj_start_addr);
-    const size_t size = obj->size();
-    last_was_obj_array = cl->do_object_bm(obj, derived_mr);
-    obj_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= obj_start_addr)  && (obj_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(obj_start_addr > prev, "Invariant");
-    cl->set_previous(obj_start_addr); // min address for next time
-  }
-}
-
 #if INCLUDE_ALL_GCS
 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
                                                                             \
--- a/hotspot/src/share/vm/memory/space.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/space.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -120,6 +120,12 @@
 
   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 
+  // Returns true if this object has been allocated since a
+  // generation's "save_marks" call.
+  virtual bool obj_allocated_since_save_marks(const oop obj) const {
+    return (HeapWord*)obj >= saved_mark_word();
+  }
+
   MemRegionClosure* preconsumptionDirtyCardClosure() const {
     return _preconsumptionDirtyCardClosure;
   }
@@ -127,9 +133,9 @@
     _preconsumptionDirtyCardClosure = cl;
   }
 
-  // Returns a subregion of the space containing all the objects in
+  // Returns a subregion of the space containing only the allocated objects in
   // the space.
-  virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
+  virtual MemRegion used_region() const = 0;
 
   // Returns a region that is guaranteed to contain (at least) all objects
   // allocated at the time of the last call to "save_marks".  If the space
@@ -139,7 +145,7 @@
   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
   // the space must distinguish between objects in the region allocated before
   // and after the call to save marks.
-  virtual MemRegion used_region_at_save_marks() const {
+  MemRegion used_region_at_save_marks() const {
     return MemRegion(bottom(), saved_mark_word());
   }
 
@@ -172,7 +178,9 @@
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
-  virtual bool is_in(const void* p) const = 0;
+  bool is_in(const void* p) const {
+    return used_region().contains(p);
+  }
 
   // Returns true iff the given reserved memory of the space contains the
   // given address.
@@ -204,24 +212,6 @@
   // objects whose internal references point to objects in the space.
   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
 
-  // Iterate over all objects that intersect with mr, calling "cl->do_object"
-  // on each.  There is an exception to this: if this closure has already
-  // been invoked on an object, it may skip such objects in some cases.  This is
-  // Most likely to happen in an "upwards" (ascending address) iteration of
-  // MemRegions.
-  virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
-  // Iterate over as many initialized objects in the space as possible,
-  // calling "cl.do_object_careful" on each. Return NULL if all objects
-  // in the space (at the start of the iteration) were iterated over.
-  // Return an address indicating the extent of the iteration in the
-  // event that the iteration had to return because of finding an
-  // uninitialized object in the space, or if the closure "cl"
-  // signaled early termination.
-  virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
-  virtual HeapWord* object_iterate_careful_m(MemRegion mr,
-                                             ObjectClosureCareful* cl);
-
   // Create and return a new dirty card to oop closure. Can be
   // overridden to return the appropriate type of closure
   // depending on the type of space in which the closure will
@@ -262,10 +252,6 @@
   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   virtual HeapWord* par_allocate(size_t word_size) = 0;
 
-  // Returns true if this object has been allocated since a
-  // generation's "save_marks" call.
-  virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
-
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
   virtual void adjust_pointers();
@@ -397,7 +383,7 @@
 
   // Perform operations on the space needed after a compaction
   // has been performed.
-  virtual void reset_after_compaction() {}
+  virtual void reset_after_compaction() = 0;
 
   // Returns the next space (in the current generation) to be compacted in
   // the global compaction order.  Also is used to select the next
@@ -462,7 +448,7 @@
   HeapWord* _end_of_live;
 
   // Minimum size of a free block.
-  virtual size_t minimum_free_block_size() const = 0;
+  virtual size_t minimum_free_block_size() const { return 0; }
 
   // This the function is invoked when an allocation of an object covering
   // "start" to "end occurs crosses the threshold; returns the next
@@ -778,7 +764,7 @@
   HeapWord* top() const            { return _top;    }
   void set_top(HeapWord* value)    { _top = value; }
 
-  virtual void set_saved_mark()    { _saved_mark_word = top();    }
+  void set_saved_mark()            { _saved_mark_word = top();    }
   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 
   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
@@ -813,35 +799,30 @@
   size_t used() const            { return byte_size(bottom(), top()); }
   size_t free() const            { return byte_size(top(),    end()); }
 
-  // Override from space.
-  bool is_in(const void* p) const;
-
   virtual bool is_free_block(const HeapWord* p) const;
 
   // In a contiguous space we have a more obvious bound on what parts
   // contain objects.
   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 
-  MemRegion used_region_at_save_marks() const {
-    return MemRegion(bottom(), saved_mark_word());
-  }
-
   // Allocation (return NULL if full)
   virtual HeapWord* allocate(size_t word_size);
   virtual HeapWord* par_allocate(size_t word_size);
 
-  virtual bool obj_allocated_since_save_marks(const oop obj) const {
-    return (HeapWord*)obj >= saved_mark_word();
-  }
-
   // Iteration
   void oop_iterate(ExtendedOopClosure* cl);
   void object_iterate(ObjectClosure* blk);
   // For contiguous spaces this method will iterate safely over objects
   // in the space (i.e., between bottom and top) when at a safepoint.
   void safe_object_iterate(ObjectClosure* blk);
-  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-  // iterates on objects up to the safe limit
+
+  // Iterate over as many initialized objects in the space as possible,
+  // calling "cl.do_object_careful" on each. Return NULL if all objects
+  // in the space (at the start of the iteration) were iterated over.
+  // Return an address indicating the extent of the iteration in the
+  // event that the iteration had to return because of finding an
+  // uninitialized object in the space, or if the closure "cl"
+  // signaled early termination.
   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   HeapWord* concurrent_iteration_safe_limit() {
     assert(_concurrent_iteration_safe_limit <= top(),
@@ -872,7 +853,6 @@
     // set new iteration safe limit
     set_concurrent_iteration_safe_limit(compaction_top());
   }
-  virtual size_t minimum_free_block_size() const { return 0; }
 
   // Override.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
--- a/hotspot/src/share/vm/memory/universe.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -632,7 +632,6 @@
   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
             "oop size is not not a multiple of HeapWord size");
   TraceTime timer("Genesis", TraceStartupTime);
-  GC_locker::lock();  // do not allow gc during bootstrapping
   JavaClasses::compute_hard_coded_offsets();
 
   jint status = Universe::initialize_heap();
@@ -1164,8 +1163,6 @@
 
   MemoryService::add_metaspace_memory_pools();
 
-  GC_locker::unlock();  // allow gc after bootstrapping
-
   MemoryService::set_universe_heap(Universe::_collectedHeap);
   return true;
 }
--- a/hotspot/src/share/vm/oops/method.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -329,14 +329,12 @@
   }
 }
 
-#ifndef PRODUCT
 void Method::print_invocation_count() {
   if (is_static()) tty->print("static ");
   if (is_final()) tty->print("final ");
   if (is_synchronized()) tty->print("synchronized ");
   if (is_native()) tty->print("native ");
-  method_holder()->name()->print_symbol_on(tty);
-  tty->print(".");
+  tty->print("%s::", method_holder()->external_name());
   name()->print_symbol_on(tty);
   signature()->print_symbol_on(tty);
 
@@ -349,12 +347,12 @@
   tty->print_cr ("  interpreter_invocation_count: %8d ", interpreter_invocation_count());
   tty->print_cr ("  invocation_counter:           %8d ", invocation_count());
   tty->print_cr ("  backedge_counter:             %8d ", backedge_count());
+#ifndef PRODUCT
   if (CountCompiledCalls) {
     tty->print_cr ("  compiled_invocation_count: %8d ", compiled_invocation_count());
   }
-
+#endif
 }
-#endif
 
 // Build a MethodData* object to hold information about this method
 // collected in the interpreter.
@@ -1443,10 +1441,6 @@
 #endif // !PRODUCT || INCLUDE_JVMTI
 
 
-//-----------------------------------------------------------------------------------
-// Non-product code
-
-#ifndef PRODUCT
 void Method::print_codes_on(outputStream* st) const {
   print_codes_on(0, code_size(), st);
 }
@@ -1460,7 +1454,6 @@
   BytecodeTracer::set_closure(BytecodeTracer::std_closure());
   while (s.next() >= 0) BytecodeTracer::trace(mh, s.bcp(), st);
 }
-#endif // not PRODUCT
 
 
 // Simple compression of line number tables. We use a regular compressed stream, except that we compress deltas
--- a/hotspot/src/share/vm/oops/method.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -394,6 +394,9 @@
 #ifndef PRODUCT
   int  compiled_invocation_count() const         { return _compiled_invocation_count;  }
   void set_compiled_invocation_count(int count)  { _compiled_invocation_count = count; }
+#else
+  // for PrintMethodData in a product build
+  int  compiled_invocation_count() const         { return 0;  }
 #endif // not PRODUCT
 
   // Clear (non-shared space) pointers which could not be relevant
@@ -462,10 +465,8 @@
   // Interpreter oopmap support
   void mask_for(int bci, InterpreterOopMap* mask);
 
-#ifndef PRODUCT
   // operations on invocation counter
   void print_invocation_count();
-#endif
 
   // byte codes
   void    set_code(address code)      { return constMethod()->set_code(code); }
@@ -474,8 +475,8 @@
 
   // prints byte codes
   void print_codes() const            { print_codes_on(tty); }
-  void print_codes_on(outputStream* st) const                      PRODUCT_RETURN;
-  void print_codes_on(int from, int to, outputStream* st) const    PRODUCT_RETURN;
+  void print_codes_on(outputStream* st) const;
+  void print_codes_on(int from, int to, outputStream* st) const;
 
   // method parameters
   bool has_method_parameters() const
--- a/hotspot/src/share/vm/oops/methodData.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -115,7 +115,6 @@
   print_data_on(st, print_data_on_helper(md));
 }
 
-#ifndef PRODUCT
 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
   st->print("bci: %d", bci());
   st->fill_to(tab_width_one);
@@ -138,7 +137,6 @@
 void ProfileData::tab(outputStream* st, bool first) const {
   st->fill_to(first ? tab_width_one : tab_width_two);
 }
-#endif // !PRODUCT
 
 // ==================================================================
 // BitData
@@ -147,23 +145,19 @@
 // whether a checkcast bytecode has seen a null value.
 
 
-#ifndef PRODUCT
 void BitData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "BitData", extra);
 }
-#endif // !PRODUCT
 
 // ==================================================================
 // CounterData
 //
 // A CounterData corresponds to a simple counter.
 
-#ifndef PRODUCT
 void CounterData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "CounterData", extra);
   st->print_cr("count(%u)", count());
 }
-#endif // !PRODUCT
 
 // ==================================================================
 // JumpData
@@ -188,12 +182,10 @@
   set_displacement(offset);
 }
 
-#ifndef PRODUCT
 void JumpData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "JumpData", extra);
   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 }
-#endif // !PRODUCT
 
 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
   // Parameter profiling include the receiver
@@ -342,7 +334,6 @@
   return MethodData::profile_arguments();
 }
 
-#ifndef PRODUCT
 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
   if (is_type_none(k)) {
     st->print("none");
@@ -398,7 +389,6 @@
     _ret.print_data_on(st);
   }
 }
-#endif
 
 // ==================================================================
 // ReceiverTypeData
@@ -417,7 +407,6 @@
   }
 }
 
-#ifndef PRODUCT
 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
   uint row;
   int entries = 0;
@@ -447,7 +436,6 @@
   print_shared(st, "VirtualCallData", extra);
   print_receiver_data_on(st);
 }
-#endif // !PRODUCT
 
 // ==================================================================
 // RetData
@@ -499,7 +487,6 @@
 }
 #endif // CC_INTERP
 
-#ifndef PRODUCT
 void RetData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "RetData", extra);
   uint row;
@@ -516,7 +503,6 @@
     }
   }
 }
-#endif // !PRODUCT
 
 // ==================================================================
 // BranchData
@@ -534,7 +520,6 @@
   set_displacement(offset);
 }
 
-#ifndef PRODUCT
 void BranchData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "BranchData", extra);
   st->print_cr("taken(%u) displacement(%d)",
@@ -542,7 +527,6 @@
   tab(st);
   st->print_cr("not taken(%u)", not_taken());
 }
-#endif
 
 // ==================================================================
 // MultiBranchData
@@ -608,7 +592,6 @@
   }
 }
 
-#ifndef PRODUCT
 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "MultiBranchData", extra);
   st->print_cr("default_count(%u) displacement(%d)",
@@ -620,9 +603,7 @@
                  count_at(i), displacement_at(i));
   }
 }
-#endif
 
-#ifndef PRODUCT
 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
   print_shared(st, "ArgInfoData", extra);
   int nargs = number_of_args();
@@ -632,8 +613,6 @@
   st->cr();
 }
 
-#endif
-
 int ParametersTypeData::compute_cell_count(Method* m) {
   if (!MethodData::profile_parameters_for_method(m)) {
     return 0;
@@ -654,7 +633,6 @@
   return MethodData::profile_parameters();
 }
 
-#ifndef PRODUCT
 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
   st->print("parameter types", extra);
   _parameters.print_data_on(st);
@@ -666,7 +644,6 @@
   method()->print_short_name(st);
   st->cr();
 }
-#endif
 
 // ==================================================================
 // MethodData*
@@ -801,6 +778,8 @@
   case Bytecodes::_invokeinterface:
   case Bytecodes::_if_acmpeq:
   case Bytecodes::_if_acmpne:
+  case Bytecodes::_ifnull:
+  case Bytecodes::_ifnonnull:
   case Bytecodes::_invokestatic:
 #ifdef COMPILER2
     return UseTypeSpeculation;
@@ -1357,8 +1336,6 @@
 
 // Printing
 
-#ifndef PRODUCT
-
 void MethodData::print_on(outputStream* st) const {
   assert(is_methodData(), "should be method data");
   st->print("method data for ");
@@ -1367,15 +1344,12 @@
   print_data_on(st);
 }
 
-#endif //PRODUCT
-
 void MethodData::print_value_on(outputStream* st) const {
   assert(is_methodData(), "should be method data");
   st->print("method data for ");
   method()->print_value_on(st);
 }
 
-#ifndef PRODUCT
 void MethodData::print_data_on(outputStream* st) const {
   ResourceMark rm;
   ProfileData* data = first_data();
@@ -1416,7 +1390,6 @@
     if (dp >= end) return;
   }
 }
-#endif
 
 #if INCLUDE_SERVICES
 // Size Statistics
--- a/hotspot/src/share/vm/oops/methodData.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -280,12 +280,10 @@
   friend class ReturnTypeEntry;
   friend class TypeStackSlotEntries;
 private:
-#ifndef PRODUCT
   enum {
     tab_width_one = 16,
     tab_width_two = 36
   };
-#endif // !PRODUCT
 
   // This is a pointer to a section of profiling data.
   DataLayout* _data;
@@ -521,10 +519,8 @@
 
   void print_data_on(outputStream* st, const MethodData* md) const;
 
-#ifndef PRODUCT
   void print_shared(outputStream* st, const char* name, const char* extra) const;
   void tab(outputStream* st, bool first = false) const;
-#endif
 };
 
 // BitData
@@ -583,9 +579,7 @@
   }
 #endif // CC_INTERP
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // CounterData
@@ -646,9 +640,7 @@
   }
 #endif // CC_INTERP
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // JumpData
@@ -733,9 +725,7 @@
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // Entries in a ProfileData object to record types: it can either be
@@ -808,9 +798,7 @@
     return with_status((intptr_t)k, in);
   }
 
-#ifndef PRODUCT
   static void print_klass(outputStream* st, intptr_t k);
-#endif
 
   // GC support
   static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
@@ -919,9 +907,7 @@
   // GC support
   void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st) const;
-#endif
 };
 
 // Type entry used for return from a call. A single cell to record the
@@ -964,9 +950,7 @@
   // GC support
   void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st) const;
-#endif
 };
 
 // Entries to collect type information at a call: contains arguments
@@ -1144,9 +1128,7 @@
     }
   }
 
-#ifndef PRODUCT
   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // ReceiverTypeData
@@ -1288,10 +1270,8 @@
   }
 #endif // CC_INTERP
 
-#ifndef PRODUCT
   void print_receiver_data_on(outputStream* st) const;
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // VirtualCallData
@@ -1332,9 +1312,7 @@
   }
 #endif // CC_INTERP
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // VirtualCallTypeData
@@ -1458,9 +1436,7 @@
     }
   }
 
-#ifndef PRODUCT
   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // RetData
@@ -1561,9 +1537,7 @@
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // BranchData
@@ -1639,9 +1613,7 @@
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // ArrayData
@@ -1832,9 +1804,7 @@
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 class ArgInfoData : public ArrayData {
@@ -1859,9 +1829,7 @@
     array_set_int_at(arg, val);
   }
 
-#ifndef PRODUCT
   void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // ParametersTypeData
@@ -1920,9 +1888,7 @@
     _parameters.clean_weak_klass_links(is_alive_closure);
   }
 
-#ifndef PRODUCT
   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 
   static ByteSize stack_slot_offset(int i) {
     return cell_offset(stack_slot_local_offset(i));
@@ -1976,9 +1942,7 @@
     set_intptr_at(method_offset, (intptr_t)m);
   }
 
-#ifndef PRODUCT
   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
-#endif
 };
 
 // MethodData*
@@ -2052,7 +2016,7 @@
 
   // Whole-method sticky bits and flags
   enum {
-    _trap_hist_limit    = 19,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 20,   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
@@ -2457,15 +2421,11 @@
   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
 
   // Printing
-#ifndef PRODUCT
   void print_on      (outputStream* st) const;
-#endif
   void print_value_on(outputStream* st) const;
 
-#ifndef PRODUCT
   // printing support for method data
   void print_data_on(outputStream* st) const;
-#endif
 
   const char* internal_name() const { return "{method data}"; }
 
--- a/hotspot/src/share/vm/opto/addnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/addnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/connode.hpp"
 #include "opto/machnode.hpp"
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -33,8 +33,8 @@
 #include "opto/addnode.hpp"
 #include "opto/callGenerator.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/parse.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
--- a/hotspot/src/share/vm/opto/callnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,6 +27,7 @@
 #include "compiler/oopMap.hpp"
 #include "opto/callGenerator.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/escape.hpp"
 #include "opto/locknode.hpp"
 #include "opto/machnode.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/castnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
+#include "opto/connode.hpp"
+#include "opto/matcher.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/subnode.hpp"
+#include "opto/type.hpp"
+
+//=============================================================================
+// If input is already higher or equal to cast type, then this is an identity.
+Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
+  return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
+}
+
+//------------------------------Value------------------------------------------
+// Take 'join' of input and cast-up type
+const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
+  if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
+  const Type* ft = phase->type(in(1))->filter_speculative(_type);
+
+#ifdef ASSERT
+  // Previous versions of this function had some special case logic,
+  // which is no longer necessary.  Make sure of the required effects.
+  switch (Opcode()) {
+    case Op_CastII:
+    {
+      const Type* t1 = phase->type(in(1));
+      if( t1 == Type::TOP )  assert(ft == Type::TOP, "special case #1");
+      const Type* rt = t1->join_speculative(_type);
+      if (rt->empty())       assert(ft == Type::TOP, "special case #2");
+      break;
+    }
+    case Op_CastPP:
+    if (phase->type(in(1)) == TypePtr::NULL_PTR &&
+        _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull)
+    assert(ft == Type::TOP, "special case #3");
+    break;
+  }
+#endif //ASSERT
+
+  return ft;
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.  Strip out
+// control copies
+Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape){
+  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+}
+
+//------------------------------Ideal_DU_postCCP-------------------------------
+// Throw away cast after constant propagation
+Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+  const Type *t = ccp->type(in(1));
+  ccp->hash_delete(this);
+  set_type(t);                   // Turn into ID function
+  ccp->hash_insert(this);
+  return this;
+}
+
+
+//=============================================================================
+
+//------------------------------Ideal_DU_postCCP-------------------------------
+// If not converting int->oop, throw away cast after constant propagation
+Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+  const Type *t = ccp->type(in(1));
+  if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) {
+    return NULL; // do not transform raw pointers or narrow oops
+  }
+  return ConstraintCastNode::Ideal_DU_postCCP(ccp);
+}
+
+
+
+//=============================================================================
+//------------------------------Identity---------------------------------------
+// If input is already higher or equal to cast type, then this is an identity.
+Node *CheckCastPPNode::Identity( PhaseTransform *phase ) {
+  // Toned down to rescue meeting at a Phi 3 different oops all implementing
+  // the same interface.  CompileTheWorld starting at 502, kd12rc1.zip.
+  return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
+}
+
+//------------------------------Value------------------------------------------
+// Take 'join' of input and cast-up type, unless working with an Interface
+const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
+  if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
+
+  const Type *inn = phase->type(in(1));
+  if( inn == Type::TOP ) return Type::TOP;  // No information yet
+
+  const TypePtr *in_type   = inn->isa_ptr();
+  const TypePtr *my_type   = _type->isa_ptr();
+  const Type *result = _type;
+  if( in_type != NULL && my_type != NULL ) {
+    TypePtr::PTR   in_ptr    = in_type->ptr();
+    if( in_ptr == TypePtr::Null ) {
+      result = in_type;
+    } else if( in_ptr == TypePtr::Constant ) {
+      // Casting a constant oop to an interface?
+      // (i.e., a String to a Comparable?)
+      // Then return the interface.
+      const TypeOopPtr *jptr = my_type->isa_oopptr();
+      assert( jptr, "" );
+      result =  (jptr->klass()->is_interface() || !in_type->higher_equal(_type))
+      ? my_type->cast_to_ptr_type( TypePtr::NotNull )
+      : in_type;
+    } else {
+      result =  my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
+    }
+  }
+
+  // This is the code from TypePtr::xmeet() that prevents us from
+  // having 2 ways to represent the same type. We have to replicate it
+  // here because we don't go through meet/join.
+  if (result->remove_speculative() == result->speculative()) {
+    result = result->remove_speculative();
+  }
+
+  // Same as above: because we don't go through meet/join, remove the
+  // speculative type if we know we won't use it.
+  return result->cleanup_speculative();
+
+  // JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
+  // FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
+
+  //
+  // Remove this code after overnight run indicates no performance
+  // loss from not performing JOIN at CheckCastPPNode
+  //
+  // const TypeInstPtr *in_oop = in->isa_instptr();
+  // const TypeInstPtr *my_oop = _type->isa_instptr();
+  // // If either input is an 'interface', return destination type
+  // assert (in_oop == NULL || in_oop->klass() != NULL, "");
+  // assert (my_oop == NULL || my_oop->klass() != NULL, "");
+  // if( (in_oop && in_oop->klass()->is_interface())
+  //   ||(my_oop && my_oop->klass()->is_interface()) ) {
+  //   TypePtr::PTR  in_ptr = in->isa_ptr() ? in->is_ptr()->_ptr : TypePtr::BotPTR;
+  //   // Preserve cast away nullness for interfaces
+  //   if( in_ptr == TypePtr::NotNull && my_oop && my_oop->_ptr == TypePtr::BotPTR ) {
+  //     return my_oop->cast_to_ptr_type(TypePtr::NotNull);
+  //   }
+  //   return _type;
+  // }
+  //
+  // // Neither the input nor the destination type is an interface,
+  //
+  // // history: JOIN used to cause weird corner case bugs
+  // //          return (in == TypeOopPtr::NULL_PTR) ? in : _type;
+  // // JOIN picks up NotNull in common instance-of/check-cast idioms, both oops.
+  // // JOIN does not preserve NotNull in other cases, e.g. RawPtr vs InstPtr
+  // const Type *join = in->join(_type);
+  // // Check if join preserved NotNull'ness for pointers
+  // if( join->isa_ptr() && _type->isa_ptr() ) {
+  //   TypePtr::PTR join_ptr = join->is_ptr()->_ptr;
+  //   TypePtr::PTR type_ptr = _type->is_ptr()->_ptr;
+  //   // If there isn't any NotNull'ness to preserve
+  //   // OR if join preserved NotNull'ness then return it
+  //   if( type_ptr == TypePtr::BotPTR  || type_ptr == TypePtr::Null ||
+  //       join_ptr == TypePtr::NotNull || join_ptr == TypePtr::Constant ) {
+  //     return join;
+  //   }
+  //   // ELSE return same old type as before
+  //   return _type;
+  // }
+  // // Not joining two pointers
+  // return join;
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.  Strip out
+// control copies
+Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
+  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *CastX2PNode::Value( PhaseTransform *phase ) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  if (t->base() == Type_X && t->singleton()) {
+    uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
+    if (bits == 0)   return TypePtr::NULL_PTR;
+    return TypeRawPtr::make((address) bits);
+  }
+  return CastX2PNode::bottom_type();
+}
+
+//------------------------------Idealize---------------------------------------
+static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
+  if (t == Type::TOP)  return false;
+  const TypeX* tl = t->is_intptr_t();
+  jint lo = min_jint;
+  jint hi = max_jint;
+  if (but_not_min_int)  ++lo;  // caller wants to negate the value w/o overflow
+  return (tl->_lo >= lo) && (tl->_hi <= hi);
+}
+
+static inline Node* addP_of_X2P(PhaseGVN *phase,
+                                Node* base,
+                                Node* dispX,
+                                bool negate = false) {
+  if (negate) {
+    dispX = new (phase->C) SubXNode(phase->MakeConX(0), phase->transform(dispX));
+  }
+  return new (phase->C) AddPNode(phase->C->top(),
+                                 phase->transform(new (phase->C) CastX2PNode(base)),
+                                 phase->transform(dispX));
+}
+
+Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
+  int op = in(1)->Opcode();
+  Node* x;
+  Node* y;
+  switch (op) {
+    case Op_SubX:
+    x = in(1)->in(1);
+    // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
+    if (phase->find_intptr_t_con(x, -1) == 0)
+    break;
+    y = in(1)->in(2);
+    if (fits_in_int(phase->type(y), true)) {
+      return addP_of_X2P(phase, x, y, true);
+    }
+    break;
+    case Op_AddX:
+    x = in(1)->in(1);
+    y = in(1)->in(2);
+    if (fits_in_int(phase->type(y))) {
+      return addP_of_X2P(phase, x, y);
+    }
+    if (fits_in_int(phase->type(x))) {
+      return addP_of_X2P(phase, y, x);
+    }
+    break;
+  }
+  return NULL;
+}
+
+//------------------------------Identity---------------------------------------
+Node *CastX2PNode::Identity( PhaseTransform *phase ) {
+  if (in(1)->Opcode() == Op_CastP2X)  return in(1)->in(1);
+  return this;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *CastP2XNode::Value( PhaseTransform *phase ) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  if (t->base() == Type::RawPtr && t->singleton()) {
+    uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
+    return TypeX::make(bits);
+  }
+  return CastP2XNode::bottom_type();
+}
+
+Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
+}
+
+//------------------------------Identity---------------------------------------
+Node *CastP2XNode::Identity( PhaseTransform *phase ) {
+  if (in(1)->Opcode() == Op_CastX2P)  return in(1)->in(1);
+  return this;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/castnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_CASTNODE_HPP
+#define SHARE_VM_OPTO_CASTNODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+
+//------------------------------ConstraintCastNode-----------------------------
+// cast to a different range
+class ConstraintCastNode: public TypeNode {
+  public:
+  ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
+    init_class_id(Class_ConstraintCast);
+    init_req(1, n);
+  }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const = 0;
+  virtual Node *Ideal_DU_postCCP( PhaseCCP * );
+};
+
+//------------------------------CastIINode-------------------------------------
+// cast integer to integer (different range)
+class CastIINode: public ConstraintCastNode {
+  public:
+  CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
+
+//------------------------------CastPPNode-------------------------------------
+// cast pointer to pointer (different type)
+class CastPPNode: public ConstraintCastNode {
+  public:
+  CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return Op_RegP; }
+  virtual Node *Ideal_DU_postCCP( PhaseCCP * );
+};
+
+//------------------------------CheckCastPPNode--------------------------------
+// for _checkcast, cast pointer to pointer (different type), without JOIN,
+class CheckCastPPNode: public TypeNode {
+  public:
+  CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
+    init_class_id(Class_CheckCastPP);
+    init_req(0, c);
+    init_req(1, n);
+  }
+
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual int   Opcode() const;
+  virtual uint  ideal_reg() const { return Op_RegP; }
+  // No longer remove CheckCast after CCP as it gives me a place to hang
+  // the proper address type - which is required to compute anti-deps.
+  //virtual Node *Ideal_DU_postCCP( PhaseCCP * );
+};
+
+
+//------------------------------CastX2PNode-------------------------------------
+// convert a machine-pointer-sized integer to a raw pointer
+class CastX2PNode : public Node {
+  public:
+  CastX2PNode( Node *n ) : Node(NULL, n) {}
+  virtual int Opcode() const;
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint ideal_reg() const { return Op_RegP; }
+  virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
+};
+
+//------------------------------CastP2XNode-------------------------------------
+// Used in both 32-bit and 64-bit land.
+// Used for card-marks and unsafe pointer math.
+class CastP2XNode : public Node {
+  public:
+  CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
+  virtual int Opcode() const;
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint ideal_reg() const { return Op_RegX; }
+  virtual const Type *bottom_type() const { return TypeX_X; }
+  // Return false to keep node from moving away from an associated card mark.
+  virtual bool depends_only_on_test() const { return false; }
+};
+
+
+
+#endif // SHARE_VM_OPTO_CASTNODE_HPP
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -29,8 +29,11 @@
 #include "opto/addnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/phaseX.hpp"
 #include "opto/regmask.hpp"
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -37,6 +37,7 @@
 #include "opto/indexSet.hpp"
 #include "opto/machnode.hpp"
 #include "opto/memnode.hpp"
+#include "opto/movenode.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/rootnode.hpp"
 
--- a/hotspot/src/share/vm/opto/classes.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/classes.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -25,17 +25,24 @@
 #include "precompiled.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
+#include "opto/countbitsnode.hpp"
 #include "opto/divnode.hpp"
+#include "opto/intrinsicnode.hpp"
 #include "opto/locknode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mathexactnode.hpp"
+#include "opto/movenode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/multnode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/node.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/subnode.hpp"
 #include "opto/vectornode.hpp"
--- a/hotspot/src/share/vm/opto/compile.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -51,6 +51,7 @@
 #include "opto/mathexactnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/node.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/output.hpp"
--- a/hotspot/src/share/vm/opto/connode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/connode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -66,1313 +66,4 @@
   return NULL;
 }
 
-//=============================================================================
-/*
-The major change is for CMoveP and StrComp.  They have related but slightly
-different problems.  They both take in TWO oops which are both null-checked
-independently before the using Node.  After CCP removes the CastPP's they need
-to pick up the guarding test edge - in this case TWO control edges.  I tried
-various solutions, all have problems:
 
-(1) Do nothing.  This leads to a bug where we hoist a Load from a CMoveP or a
-StrComp above a guarding null check.  I've seen both cases in normal -Xcomp
-testing.
-
-(2) Plug the control edge from 1 of the 2 oops in.  Apparent problem here is
-to figure out which test post-dominates.  The real problem is that it doesn't
-matter which one you pick.  After you pick up, the dominating-test elider in
-IGVN can remove the test and allow you to hoist up to the dominating test on
-the chosen oop bypassing the test on the not-chosen oop.  Seen in testing.
-Oops.
-
-(3) Leave the CastPP's in.  This makes the graph more accurate in some sense;
-we get to keep around the knowledge that an oop is not-null after some test.
-Alas, the CastPP's interfere with GVN (some values are the regular oop, some
-are the CastPP of the oop, all merge at Phi's which cannot collapse, etc).
-This cost us 10% on SpecJVM, even when I removed some of the more trivial
-cases in the optimizer.  Removing more useless Phi's started allowing Loads to
-illegally float above null checks.  I gave up on this approach.
-
-(4) Add BOTH control edges to both tests.  Alas, too much code knows that
-control edges are in slot-zero ONLY.  Many quick asserts fail; no way to do
-this one.  Note that I really want to allow the CMoveP to float and add both
-control edges to the dependent Load op - meaning I can select early but I
-cannot Load until I pass both tests.
-
-(5) Do not hoist CMoveP and StrComp.  To this end I added the v-call
-depends_only_on_test().  No obvious performance loss on Spec, but we are
-clearly conservative on CMoveP (also so on StrComp but that's unlikely to
-matter ever).
-
-*/
-
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.
-// Move constants to the right.
-Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
-  // Don't bother trying to transform a dead node
-  if( in(0) && in(0)->is_top() )  return NULL;
-  assert( !phase->eqv(in(Condition), this) &&
-          !phase->eqv(in(IfFalse), this) &&
-          !phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
-  if( phase->type(in(Condition)) == Type::TOP )
-    return NULL; // return NULL when Condition is dead
-
-  if( in(IfFalse)->is_Con() && !in(IfTrue)->is_Con() ) {
-    if( in(Condition)->is_Bool() ) {
-      BoolNode* b  = in(Condition)->as_Bool();
-      BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
-    }
-  }
-  return NULL;
-}
-
-//------------------------------is_cmove_id------------------------------------
-// Helper function to check for CMOVE identity.  Shared with PhiNode::Identity
-Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b ) {
-  // Check for Cmp'ing and CMove'ing same values
-  if( (phase->eqv(cmp->in(1),f) &&
-       phase->eqv(cmp->in(2),t)) ||
-      // Swapped Cmp is OK
-      (phase->eqv(cmp->in(2),f) &&
-       phase->eqv(cmp->in(1),t)) ) {
-    // Give up this identity check for floating points because it may choose incorrect
-    // value around 0.0 and -0.0
-    if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD )
-      return NULL;
-    // Check for "(t==f)?t:f;" and replace with "f"
-    if( b->_test._test == BoolTest::eq )
-      return f;
-    // Allow the inverted case as well
-    // Check for "(t!=f)?t:f;" and replace with "t"
-    if( b->_test._test == BoolTest::ne )
-      return t;
-  }
-  return NULL;
-}
-
-//------------------------------Identity---------------------------------------
-// Conditional-move is an identity if both inputs are the same, or the test
-// true or false.
-Node *CMoveNode::Identity( PhaseTransform *phase ) {
-  if( phase->eqv(in(IfFalse),in(IfTrue)) ) // C-moving identical inputs?
-    return in(IfFalse);         // Then it doesn't matter
-  if( phase->type(in(Condition)) == TypeInt::ZERO )
-    return in(IfFalse);         // Always pick left(false) input
-  if( phase->type(in(Condition)) == TypeInt::ONE )
-    return in(IfTrue);          // Always pick right(true) input
-
-  // Check for CMove'ing a constant after comparing against the constant.
-  // Happens all the time now, since if we compare equality vs a constant in
-  // the parser, we "know" the variable is constant on one path and we force
-  // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
-  // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
-  // general in that we don't need constants.
-  if( in(Condition)->is_Bool() ) {
-    BoolNode *b = in(Condition)->as_Bool();
-    Node *cmp = b->in(1);
-    if( cmp->is_Cmp() ) {
-      Node *id = is_cmove_id( phase, cmp, in(IfTrue), in(IfFalse), b );
-      if( id ) return id;
-    }
-  }
-
-  return this;
-}
-
-//------------------------------Value------------------------------------------
-// Result is the meet of inputs
-const Type *CMoveNode::Value( PhaseTransform *phase ) const {
-  if( phase->type(in(Condition)) == Type::TOP )
-    return Type::TOP;
-  return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
-}
-
-//------------------------------make-------------------------------------------
-// Make a correctly-flavored CMove.  Since _type is directly determined
-// from the inputs we do not need to specify it here.
-CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
-  switch( t->basic_type() ) {
-  case T_INT:     return new (C) CMoveINode( bol, left, right, t->is_int() );
-  case T_FLOAT:   return new (C) CMoveFNode( bol, left, right, t );
-  case T_DOUBLE:  return new (C) CMoveDNode( bol, left, right, t );
-  case T_LONG:    return new (C) CMoveLNode( bol, left, right, t->is_long() );
-  case T_OBJECT:  return new (C) CMovePNode( c, bol, left, right, t->is_oopptr() );
-  case T_ADDRESS: return new (C) CMovePNode( c, bol, left, right, t->is_ptr() );
-  case T_NARROWOOP: return new (C) CMoveNNode( c, bol, left, right, t );
-  default:
-    ShouldNotReachHere();
-    return NULL;
-  }
-}
-
-//=============================================================================
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.
-// Check for conversions to boolean
-Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  // Try generic ideal's first
-  Node *x = CMoveNode::Ideal(phase, can_reshape);
-  if( x ) return x;
-
-  // If zero is on the left (false-case, no-move-case) it must mean another
-  // constant is on the right (otherwise the shared CMove::Ideal code would
-  // have moved the constant to the right).  This situation is bad for Intel
-  // and a don't-care for Sparc.  It's bad for Intel because the zero has to
-  // be manifested in a register with a XOR which kills flags, which are live
-  // on input to the CMoveI, leading to a situation which causes excessive
-  // spilling on Intel.  For Sparc, if the zero in on the left the Sparc will
-  // zero a register via G0 and conditionally-move the other constant.  If the
-  // zero is on the right, the Sparc will load the first constant with a
-  // 13-bit set-lo and conditionally move G0.  See bug 4677505.
-  if( phase->type(in(IfFalse)) == TypeInt::ZERO && !(phase->type(in(IfTrue)) == TypeInt::ZERO) ) {
-    if( in(Condition)->is_Bool() ) {
-      BoolNode* b  = in(Condition)->as_Bool();
-      BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
-    }
-  }
-
-  // Now check for booleans
-  int flip = 0;
-
-  // Check for picking from zero/one
-  if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) {
-    flip = 1 - flip;
-  } else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) {
-  } else return NULL;
-
-  // Check for eq/ne test
-  if( !in(1)->is_Bool() ) return NULL;
-  BoolNode *bol = in(1)->as_Bool();
-  if( bol->_test._test == BoolTest::eq ) {
-  } else if( bol->_test._test == BoolTest::ne ) {
-    flip = 1-flip;
-  } else return NULL;
-
-  // Check for vs 0 or 1
-  if( !bol->in(1)->is_Cmp() ) return NULL;
-  const CmpNode *cmp = bol->in(1)->as_Cmp();
-  if( phase->type(cmp->in(2)) == TypeInt::ZERO ) {
-  } else if( phase->type(cmp->in(2)) == TypeInt::ONE ) {
-    // Allow cmp-vs-1 if the other input is bounded by 0-1
-    if( phase->type(cmp->in(1)) != TypeInt::BOOL )
-      return NULL;
-    flip = 1 - flip;
-  } else return NULL;
-
-  // Convert to a bool (flipped)
-  // Build int->bool conversion
-#ifndef PRODUCT
-  if( PrintOpto ) tty->print_cr("CMOV to I2B");
-#endif
-  Node *n = new (phase->C) Conv2BNode( cmp->in(1) );
-  if( flip )
-    n = new (phase->C) XorINode( phase->transform(n), phase->intcon(1) );
-
-  return n;
-}
-
-//=============================================================================
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.
-// Check for absolute value
-Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  // Try generic ideal's first
-  Node *x = CMoveNode::Ideal(phase, can_reshape);
-  if( x ) return x;
-
-  int  cmp_zero_idx = 0;        // Index of compare input where to look for zero
-  int  phi_x_idx = 0;           // Index of phi input where to find naked x
-
-  // Find the Bool
-  if( !in(1)->is_Bool() ) return NULL;
-  BoolNode *bol = in(1)->as_Bool();
-  // Check bool sense
-  switch( bol->_test._test ) {
-  case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue;  break;
-  case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
-  case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue;  break;
-  case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
-  default:           return NULL;                           break;
-  }
-
-  // Find zero input of CmpF; the other input is being abs'd
-  Node *cmpf = bol->in(1);
-  if( cmpf->Opcode() != Op_CmpF ) return NULL;
-  Node *X = NULL;
-  bool flip = false;
-  if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) {
-    X = cmpf->in(3 - cmp_zero_idx);
-  } else if (phase->type(cmpf->in(3 - cmp_zero_idx)) == TypeF::ZERO) {
-    // The test is inverted, we should invert the result...
-    X = cmpf->in(cmp_zero_idx);
-    flip = true;
-  } else {
-    return NULL;
-  }
-
-  // If X is found on the appropriate phi input, find the subtract on the other
-  if( X != in(phi_x_idx) ) return NULL;
-  int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
-  Node *sub = in(phi_sub_idx);
-
-  // Allow only SubF(0,X) and fail out for all others; NegF is not OK
-  if( sub->Opcode() != Op_SubF ||
-      sub->in(2) != X ||
-      phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
-
-  Node *abs = new (phase->C) AbsFNode( X );
-  if( flip )
-    abs = new (phase->C) SubFNode(sub->in(1), phase->transform(abs));
-
-  return abs;
-}
-
-//=============================================================================
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.
-// Check for absolute value
-Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  // Try generic ideal's first
-  Node *x = CMoveNode::Ideal(phase, can_reshape);
-  if( x ) return x;
-
-  int  cmp_zero_idx = 0;        // Index of compare input where to look for zero
-  int  phi_x_idx = 0;           // Index of phi input where to find naked x
-
-  // Find the Bool
-  if( !in(1)->is_Bool() ) return NULL;
-  BoolNode *bol = in(1)->as_Bool();
-  // Check bool sense
-  switch( bol->_test._test ) {
-  case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue;  break;
-  case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
-  case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue;  break;
-  case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
-  default:           return NULL;                           break;
-  }
-
-  // Find zero input of CmpD; the other input is being abs'd
-  Node *cmpd = bol->in(1);
-  if( cmpd->Opcode() != Op_CmpD ) return NULL;
-  Node *X = NULL;
-  bool flip = false;
-  if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) {
-    X = cmpd->in(3 - cmp_zero_idx);
-  } else if (phase->type(cmpd->in(3 - cmp_zero_idx)) == TypeD::ZERO) {
-    // The test is inverted, we should invert the result...
-    X = cmpd->in(cmp_zero_idx);
-    flip = true;
-  } else {
-    return NULL;
-  }
-
-  // If X is found on the appropriate phi input, find the subtract on the other
-  if( X != in(phi_x_idx) ) return NULL;
-  int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
-  Node *sub = in(phi_sub_idx);
-
-  // Allow only SubD(0,X) and fail out for all others; NegD is not OK
-  if( sub->Opcode() != Op_SubD ||
-      sub->in(2) != X ||
-      phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
-
-  Node *abs = new (phase->C) AbsDNode( X );
-  if( flip )
-    abs = new (phase->C) SubDNode(sub->in(1), phase->transform(abs));
-
-  return abs;
-}
-
-
-//=============================================================================
-// If input is already higher or equal to cast type, then this is an identity.
-Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
-  return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
-}
-
-//------------------------------Value------------------------------------------
-// Take 'join' of input and cast-up type
-const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
-  if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
-const Type* ft = phase->type(in(1))->filter_speculative(_type);
-
-#ifdef ASSERT
-  // Previous versions of this function had some special case logic,
-  // which is no longer necessary.  Make sure of the required effects.
-  switch (Opcode()) {
-  case Op_CastII:
-    {
-      const Type* t1 = phase->type(in(1));
-      if( t1 == Type::TOP )  assert(ft == Type::TOP, "special case #1");
-      const Type* rt = t1->join_speculative(_type);
-      if (rt->empty())       assert(ft == Type::TOP, "special case #2");
-      break;
-    }
-  case Op_CastPP:
-    if (phase->type(in(1)) == TypePtr::NULL_PTR &&
-        _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull)
-      assert(ft == Type::TOP, "special case #3");
-    break;
-  }
-#endif //ASSERT
-
-  return ft;
-}
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.  Strip out
-// control copies
-Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape){
-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
-}
-
-//------------------------------Ideal_DU_postCCP-------------------------------
-// Throw away cast after constant propagation
-Node *ConstraintCastNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
-  const Type *t = ccp->type(in(1));
-  ccp->hash_delete(this);
-  set_type(t);                   // Turn into ID function
-  ccp->hash_insert(this);
-  return this;
-}
-
-
-//=============================================================================
-
-//------------------------------Ideal_DU_postCCP-------------------------------
-// If not converting int->oop, throw away cast after constant propagation
-Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
-  const Type *t = ccp->type(in(1));
-  if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) {
-    return NULL; // do not transform raw pointers or narrow oops
-  }
-  return ConstraintCastNode::Ideal_DU_postCCP(ccp);
-}
-
-
-
-//=============================================================================
-//------------------------------Identity---------------------------------------
-// If input is already higher or equal to cast type, then this is an identity.
-Node *CheckCastPPNode::Identity( PhaseTransform *phase ) {
-  // Toned down to rescue meeting at a Phi 3 different oops all implementing
-  // the same interface.  CompileTheWorld starting at 502, kd12rc1.zip.
-  return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
-}
-
-//------------------------------Value------------------------------------------
-// Take 'join' of input and cast-up type, unless working with an Interface
-const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
-  if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
-
-  const Type *inn = phase->type(in(1));
-  if( inn == Type::TOP ) return Type::TOP;  // No information yet
-
-  const TypePtr *in_type   = inn->isa_ptr();
-  const TypePtr *my_type   = _type->isa_ptr();
-  const Type *result = _type;
-  if( in_type != NULL && my_type != NULL ) {
-    TypePtr::PTR   in_ptr    = in_type->ptr();
-    if( in_ptr == TypePtr::Null ) {
-      result = in_type;
-    } else if( in_ptr == TypePtr::Constant ) {
-      // Casting a constant oop to an interface?
-      // (i.e., a String to a Comparable?)
-      // Then return the interface.
-      const TypeOopPtr *jptr = my_type->isa_oopptr();
-      assert( jptr, "" );
-      result =  (jptr->klass()->is_interface() || !in_type->higher_equal(_type))
-        ? my_type->cast_to_ptr_type( TypePtr::NotNull )
-        : in_type;
-    } else {
-      result =  my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
-    }
-  }
-  return result;
-
-  // JOIN NOT DONE HERE BECAUSE OF INTERFACE ISSUES.
-  // FIX THIS (DO THE JOIN) WHEN UNION TYPES APPEAR!
-
-  //
-  // Remove this code after overnight run indicates no performance
-  // loss from not performing JOIN at CheckCastPPNode
-  //
-  // const TypeInstPtr *in_oop = in->isa_instptr();
-  // const TypeInstPtr *my_oop = _type->isa_instptr();
-  // // If either input is an 'interface', return destination type
-  // assert (in_oop == NULL || in_oop->klass() != NULL, "");
-  // assert (my_oop == NULL || my_oop->klass() != NULL, "");
-  // if( (in_oop && in_oop->klass()->is_interface())
-  //   ||(my_oop && my_oop->klass()->is_interface()) ) {
-  //   TypePtr::PTR  in_ptr = in->isa_ptr() ? in->is_ptr()->_ptr : TypePtr::BotPTR;
-  //   // Preserve cast away nullness for interfaces
-  //   if( in_ptr == TypePtr::NotNull && my_oop && my_oop->_ptr == TypePtr::BotPTR ) {
-  //     return my_oop->cast_to_ptr_type(TypePtr::NotNull);
-  //   }
-  //   return _type;
-  // }
-  //
-  // // Neither the input nor the destination type is an interface,
-  //
-  // // history: JOIN used to cause weird corner case bugs
-  // //          return (in == TypeOopPtr::NULL_PTR) ? in : _type;
-  // // JOIN picks up NotNull in common instance-of/check-cast idioms, both oops.
-  // // JOIN does not preserve NotNull in other cases, e.g. RawPtr vs InstPtr
-  // const Type *join = in->join(_type);
-  // // Check if join preserved NotNull'ness for pointers
-  // if( join->isa_ptr() && _type->isa_ptr() ) {
-  //   TypePtr::PTR join_ptr = join->is_ptr()->_ptr;
-  //   TypePtr::PTR type_ptr = _type->is_ptr()->_ptr;
-  //   // If there isn't any NotNull'ness to preserve
-  //   // OR if join preserved NotNull'ness then return it
-  //   if( type_ptr == TypePtr::BotPTR  || type_ptr == TypePtr::Null ||
-  //       join_ptr == TypePtr::NotNull || join_ptr == TypePtr::Constant ) {
-  //     return join;
-  //   }
-  //   // ELSE return same old type as before
-  //   return _type;
-  // }
-  // // Not joining two pointers
-  // return join;
-}
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.  Strip out
-// control copies
-Node *CheckCastPPNode::Ideal(PhaseGVN *phase, bool can_reshape){
-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
-}
-
-
-Node* DecodeNNode::Identity(PhaseTransform* phase) {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return in(1);
-
-  if (in(1)->is_EncodeP()) {
-    // (DecodeN (EncodeP p)) -> p
-    return in(1)->in(1);
-  }
-  return this;
-}
-
-const Type *DecodeNNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if (t == Type::TOP) return Type::TOP;
-  if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR;
-
-  assert(t->isa_narrowoop(), "only  narrowoop here");
-  return t->make_ptr();
-}
-
-Node* EncodePNode::Identity(PhaseTransform* phase) {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return in(1);
-
-  if (in(1)->is_DecodeN()) {
-    // (EncodeP (DecodeN p)) -> p
-    return in(1)->in(1);
-  }
-  return this;
-}
-
-const Type *EncodePNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if (t == Type::TOP) return Type::TOP;
-  if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR;
-
-  assert(t->isa_oop_ptr(), "only oopptr here");
-  return t->make_narrowoop();
-}
-
-
-Node *EncodeNarrowPtrNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
-  return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1));
-}
-
-Node* DecodeNKlassNode::Identity(PhaseTransform* phase) {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return in(1);
-
-  if (in(1)->is_EncodePKlass()) {
-    // (DecodeNKlass (EncodePKlass p)) -> p
-    return in(1)->in(1);
-  }
-  return this;
-}
-
-const Type *DecodeNKlassNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if (t == Type::TOP) return Type::TOP;
-  assert(t != TypeNarrowKlass::NULL_PTR, "null klass?");
-
-  assert(t->isa_narrowklass(), "only narrow klass ptr here");
-  return t->make_ptr();
-}
-
-Node* EncodePKlassNode::Identity(PhaseTransform* phase) {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return in(1);
-
-  if (in(1)->is_DecodeNKlass()) {
-    // (EncodePKlass (DecodeNKlass p)) -> p
-    return in(1)->in(1);
-  }
-  return this;
-}
-
-const Type *EncodePKlassNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if (t == Type::TOP) return Type::TOP;
-  assert (t != TypePtr::NULL_PTR, "null klass?");
-
-  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
-  return t->make_narrowklass();
-}
-
-
-//=============================================================================
-//------------------------------Identity---------------------------------------
-Node *Conv2BNode::Identity( PhaseTransform *phase ) {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return in(1);
-  if( t == TypeInt::ZERO ) return in(1);
-  if( t == TypeInt::ONE ) return in(1);
-  if( t == TypeInt::BOOL ) return in(1);
-  return this;
-}
-
-//------------------------------Value------------------------------------------
-const Type *Conv2BNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == TypeInt::ZERO ) return TypeInt::ZERO;
-  if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO;
-  const TypePtr *tp = t->isa_ptr();
-  if( tp != NULL ) {
-    if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP;
-    if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE;
-    if (tp->ptr() == TypePtr::NotNull)  return TypeInt::ONE;
-    return TypeInt::BOOL;
-  }
-  if (t->base() != Type::Int) return TypeInt::BOOL;
-  const TypeInt *ti = t->is_int();
-  if( ti->_hi < 0 || ti->_lo > 0 ) return TypeInt::ONE;
-  return TypeInt::BOOL;
-}
-
-
-// The conversions operations are all Alpha sorted.  Please keep it that way!
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvD2FNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == Type::DOUBLE ) return Type::FLOAT;
-  const TypeD *td = t->is_double_constant();
-  return TypeF::make( (float)td->getd() );
-}
-
-//------------------------------Identity---------------------------------------
-// Float's can be converted to doubles with no loss of bits.  Hence
-// converting a float to a double and back to a float is a NOP.
-Node *ConvD2FNode::Identity(PhaseTransform *phase) {
-  return (in(1)->Opcode() == Op_ConvF2D) ? in(1)->in(1) : this;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvD2INode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == Type::DOUBLE ) return TypeInt::INT;
-  const TypeD *td = t->is_double_constant();
-  return TypeInt::make( SharedRuntime::d2i( td->getd() ) );
-}
-
-//------------------------------Ideal------------------------------------------
-// If converting to an int type, skip any rounding nodes
-Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_RoundDouble )
-    set_req(1,in(1)->in(1));
-  return NULL;
-}
-
-//------------------------------Identity---------------------------------------
-// Int's can be converted to doubles with no loss of bits.  Hence
-// converting an integer to a double and back to an integer is a NOP.
-Node *ConvD2INode::Identity(PhaseTransform *phase) {
-  return (in(1)->Opcode() == Op_ConvI2D) ? in(1)->in(1) : this;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvD2LNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == Type::DOUBLE ) return TypeLong::LONG;
-  const TypeD *td = t->is_double_constant();
-  return TypeLong::make( SharedRuntime::d2l( td->getd() ) );
-}
-
-//------------------------------Identity---------------------------------------
-Node *ConvD2LNode::Identity(PhaseTransform *phase) {
-  // Remove ConvD2L->ConvL2D->ConvD2L sequences.
-  if( in(1)       ->Opcode() == Op_ConvL2D &&
-      in(1)->in(1)->Opcode() == Op_ConvD2L )
-    return in(1)->in(1);
-  return this;
-}
-
-//------------------------------Ideal------------------------------------------
-// If converting to an int type, skip any rounding nodes
-Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_RoundDouble )
-    set_req(1,in(1)->in(1));
-  return NULL;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvF2DNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == Type::FLOAT ) return Type::DOUBLE;
-  const TypeF *tf = t->is_float_constant();
-  return TypeD::make( (double)tf->getf() );
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvF2INode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP )       return Type::TOP;
-  if( t == Type::FLOAT ) return TypeInt::INT;
-  const TypeF *tf = t->is_float_constant();
-  return TypeInt::make( SharedRuntime::f2i( tf->getf() ) );
-}
-
-//------------------------------Identity---------------------------------------
-Node *ConvF2INode::Identity(PhaseTransform *phase) {
-  // Remove ConvF2I->ConvI2F->ConvF2I sequences.
-  if( in(1)       ->Opcode() == Op_ConvI2F &&
-      in(1)->in(1)->Opcode() == Op_ConvF2I )
-    return in(1)->in(1);
-  return this;
-}
-
-//------------------------------Ideal------------------------------------------
-// If converting to an int type, skip any rounding nodes
-Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_RoundFloat )
-    set_req(1,in(1)->in(1));
-  return NULL;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvF2LNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP )       return Type::TOP;
-  if( t == Type::FLOAT ) return TypeLong::LONG;
-  const TypeF *tf = t->is_float_constant();
-  return TypeLong::make( SharedRuntime::f2l( tf->getf() ) );
-}
-
-//------------------------------Identity---------------------------------------
-Node *ConvF2LNode::Identity(PhaseTransform *phase) {
-  // Remove ConvF2L->ConvL2F->ConvF2L sequences.
-  if( in(1)       ->Opcode() == Op_ConvL2F &&
-      in(1)->in(1)->Opcode() == Op_ConvF2L )
-    return in(1)->in(1);
-  return this;
-}
-
-//------------------------------Ideal------------------------------------------
-// If converting to an int type, skip any rounding nodes
-Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if( in(1)->Opcode() == Op_RoundFloat )
-    set_req(1,in(1)->in(1));
-  return NULL;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvI2DNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeInt *ti = t->is_int();
-  if( ti->is_con() ) return TypeD::make( (double)ti->get_con() );
-  return bottom_type();
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvI2FNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeInt *ti = t->is_int();
-  if( ti->is_con() ) return TypeF::make( (float)ti->get_con() );
-  return bottom_type();
-}
-
-//------------------------------Identity---------------------------------------
-Node *ConvI2FNode::Identity(PhaseTransform *phase) {
-  // Remove ConvI2F->ConvF2I->ConvI2F sequences.
-  if( in(1)       ->Opcode() == Op_ConvF2I &&
-      in(1)->in(1)->Opcode() == Op_ConvI2F )
-    return in(1)->in(1);
-  return this;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvI2LNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeInt *ti = t->is_int();
-  const Type* tl = TypeLong::make(ti->_lo, ti->_hi, ti->_widen);
-  // Join my declared type against my incoming type.
-  tl = tl->filter(_type);
-  return tl;
-}
-
-#ifdef _LP64
-static inline bool long_ranges_overlap(jlong lo1, jlong hi1,
-                                       jlong lo2, jlong hi2) {
-  // Two ranges overlap iff one range's low point falls in the other range.
-  return (lo2 <= lo1 && lo1 <= hi2) || (lo1 <= lo2 && lo2 <= hi1);
-}
-#endif
-
-//------------------------------Ideal------------------------------------------
-Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  const TypeLong* this_type = this->type()->is_long();
-  Node* this_changed = NULL;
-
-  // If _major_progress, then more loop optimizations follow.  Do NOT
-  // remove this node's type assertion until no more loop ops can happen.
-  // The progress bit is set in the major loop optimizations THEN comes the
-  // call to IterGVN and any chance of hitting this code.  Cf. Opaque1Node.
-  if (can_reshape && !phase->C->major_progress()) {
-    const TypeInt* in_type = phase->type(in(1))->isa_int();
-    if (in_type != NULL && this_type != NULL &&
-        (in_type->_lo != this_type->_lo ||
-         in_type->_hi != this_type->_hi)) {
-      // Although this WORSENS the type, it increases GVN opportunities,
-      // because I2L nodes with the same input will common up, regardless
-      // of slightly differing type assertions.  Such slight differences
-      // arise routinely as a result of loop unrolling, so this is a
-      // post-unrolling graph cleanup.  Choose a type which depends only
-      // on my input.  (Exception:  Keep a range assertion of >=0 or <0.)
-      jlong lo1 = this_type->_lo;
-      jlong hi1 = this_type->_hi;
-      int   w1  = this_type->_widen;
-      if (lo1 != (jint)lo1 ||
-          hi1 != (jint)hi1 ||
-          lo1 > hi1) {
-        // Overflow leads to wraparound, wraparound leads to range saturation.
-        lo1 = min_jint; hi1 = max_jint;
-      } else if (lo1 >= 0) {
-        // Keep a range assertion of >=0.
-        lo1 = 0;        hi1 = max_jint;
-      } else if (hi1 < 0) {
-        // Keep a range assertion of <0.
-        lo1 = min_jint; hi1 = -1;
-      } else {
-        lo1 = min_jint; hi1 = max_jint;
-      }
-      const TypeLong* wtype = TypeLong::make(MAX2((jlong)in_type->_lo, lo1),
-                                             MIN2((jlong)in_type->_hi, hi1),
-                                             MAX2((int)in_type->_widen, w1));
-      if (wtype != type()) {
-        set_type(wtype);
-        // Note: this_type still has old type value, for the logic below.
-        this_changed = this;
-      }
-    }
-  }
-
-#ifdef _LP64
-  // Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
-  // but only if x and y have subranges that cannot cause 32-bit overflow,
-  // under the assumption that x+y is in my own subrange this->type().
-
-  // This assumption is based on a constraint (i.e., type assertion)
-  // established in Parse::array_addressing or perhaps elsewhere.
-  // This constraint has been adjoined to the "natural" type of
-  // the incoming argument in(0).  We know (because of runtime
-  // checks) - that the result value I2L(x+y) is in the joined range.
-  // Hence we can restrict the incoming terms (x, y) to values such
-  // that their sum also lands in that range.
-
-  // This optimization is useful only on 64-bit systems, where we hope
-  // the addition will end up subsumed in an addressing mode.
-  // It is necessary to do this when optimizing an unrolled array
-  // copy loop such as x[i++] = y[i++].
-
-  // On 32-bit systems, it's better to perform as much 32-bit math as
-  // possible before the I2L conversion, because 32-bit math is cheaper.
-  // There's no common reason to "leak" a constant offset through the I2L.
-  // Addressing arithmetic will not absorb it as part of a 64-bit AddL.
-
-  Node* z = in(1);
-  int op = z->Opcode();
-  if (op == Op_AddI || op == Op_SubI) {
-    Node* x = z->in(1);
-    Node* y = z->in(2);
-    assert (x != z && y != z, "dead loop in ConvI2LNode::Ideal");
-    if (phase->type(x) == Type::TOP)  return this_changed;
-    if (phase->type(y) == Type::TOP)  return this_changed;
-    const TypeInt*  tx = phase->type(x)->is_int();
-    const TypeInt*  ty = phase->type(y)->is_int();
-    const TypeLong* tz = this_type;
-    jlong xlo = tx->_lo;
-    jlong xhi = tx->_hi;
-    jlong ylo = ty->_lo;
-    jlong yhi = ty->_hi;
-    jlong zlo = tz->_lo;
-    jlong zhi = tz->_hi;
-    jlong vbit = CONST64(1) << BitsPerInt;
-    int widen =  MAX2(tx->_widen, ty->_widen);
-    if (op == Op_SubI) {
-      jlong ylo0 = ylo;
-      ylo = -yhi;
-      yhi = -ylo0;
-    }
-    // See if x+y can cause positive overflow into z+2**32
-    if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo+vbit, zhi+vbit)) {
-      return this_changed;
-    }
-    // See if x+y can cause negative overflow into z-2**32
-    if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo-vbit, zhi-vbit)) {
-      return this_changed;
-    }
-    // Now it's always safe to assume x+y does not overflow.
-    // This is true even if some pairs x,y might cause overflow, as long
-    // as that overflow value cannot fall into [zlo,zhi].
-
-    // Confident that the arithmetic is "as if infinite precision",
-    // we can now use z's range to put constraints on those of x and y.
-    // The "natural" range of x [xlo,xhi] can perhaps be narrowed to a
-    // more "restricted" range by intersecting [xlo,xhi] with the
-    // range obtained by subtracting y's range from the asserted range
-    // of the I2L conversion.  Here's the interval arithmetic algebra:
-    //    x == z-y == [zlo,zhi]-[ylo,yhi] == [zlo,zhi]+[-yhi,-ylo]
-    //    => x in [zlo-yhi, zhi-ylo]
-    //    => x in [zlo-yhi, zhi-ylo] INTERSECT [xlo,xhi]
-    //    => x in [xlo MAX zlo-yhi, xhi MIN zhi-ylo]
-    jlong rxlo = MAX2(xlo, zlo - yhi);
-    jlong rxhi = MIN2(xhi, zhi - ylo);
-    // And similarly, x changing place with y:
-    jlong rylo = MAX2(ylo, zlo - xhi);
-    jlong ryhi = MIN2(yhi, zhi - xlo);
-    if (rxlo > rxhi || rylo > ryhi) {
-      return this_changed;  // x or y is dying; don't mess w/ it
-    }
-    if (op == Op_SubI) {
-      jlong rylo0 = rylo;
-      rylo = -ryhi;
-      ryhi = -rylo0;
-    }
-
-    Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
-    Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
-    switch (op) {
-    case Op_AddI:  return new (phase->C) AddLNode(cx, cy);
-    case Op_SubI:  return new (phase->C) SubLNode(cx, cy);
-    default:       ShouldNotReachHere();
-    }
-  }
-#endif //_LP64
-
-  return this_changed;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvL2DNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeLong *tl = t->is_long();
-  if( tl->is_con() ) return TypeD::make( (double)tl->get_con() );
-  return bottom_type();
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *ConvL2FNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeLong *tl = t->is_long();
-  if( tl->is_con() ) return TypeF::make( (float)tl->get_con() );
-  return bottom_type();
-}
-
-//=============================================================================
-//----------------------------Identity-----------------------------------------
-Node *ConvL2INode::Identity( PhaseTransform *phase ) {
-  // Convert L2I(I2L(x)) => x
-  if (in(1)->Opcode() == Op_ConvI2L)  return in(1)->in(1);
-  return this;
-}
-
-//------------------------------Value------------------------------------------
-const Type *ConvL2INode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeLong *tl = t->is_long();
-  if (tl->is_con())
-    // Easy case.
-    return TypeInt::make((jint)tl->get_con());
-  return bottom_type();
-}
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.
-// Blow off prior masking to int
-Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  Node *andl = in(1);
-  uint andl_op = andl->Opcode();
-  if( andl_op == Op_AndL ) {
-    // Blow off prior masking to int
-    if( phase->type(andl->in(2)) == TypeLong::make( 0xFFFFFFFF ) ) {
-      set_req(1,andl->in(1));
-      return this;
-    }
-  }
-
-  // Swap with a prior add: convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
-  // This replaces an 'AddL' with an 'AddI'.
-  if( andl_op == Op_AddL ) {
-    // Don't do this for nodes which have more than one user since
-    // we'll end up computing the long add anyway.
-    if (andl->outcnt() > 1) return NULL;
-
-    Node* x = andl->in(1);
-    Node* y = andl->in(2);
-    assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" );
-    if (phase->type(x) == Type::TOP)  return NULL;
-    if (phase->type(y) == Type::TOP)  return NULL;
-    Node *add1 = phase->transform(new (phase->C) ConvL2INode(x));
-    Node *add2 = phase->transform(new (phase->C) ConvL2INode(y));
-    return new (phase->C) AddINode(add1,add2);
-  }
-
-  // Disable optimization: LoadL->ConvL2I ==> LoadI.
-  // It causes problems (sizes of Load and Store nodes do not match)
-  // in objects initialization code and Escape Analysis.
-  return NULL;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *CastX2PNode::Value( PhaseTransform *phase ) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  if (t->base() == Type_X && t->singleton()) {
-    uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
-    if (bits == 0)   return TypePtr::NULL_PTR;
-    return TypeRawPtr::make((address) bits);
-  }
-  return CastX2PNode::bottom_type();
-}
-
-//------------------------------Idealize---------------------------------------
-static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
-  if (t == Type::TOP)  return false;
-  const TypeX* tl = t->is_intptr_t();
-  jint lo = min_jint;
-  jint hi = max_jint;
-  if (but_not_min_int)  ++lo;  // caller wants to negate the value w/o overflow
-  return (tl->_lo >= lo) && (tl->_hi <= hi);
-}
-
-static inline Node* addP_of_X2P(PhaseGVN *phase,
-                                Node* base,
-                                Node* dispX,
-                                bool negate = false) {
-  if (negate) {
-    dispX = new (phase->C) SubXNode(phase->MakeConX(0), phase->transform(dispX));
-  }
-  return new (phase->C) AddPNode(phase->C->top(),
-                          phase->transform(new (phase->C) CastX2PNode(base)),
-                          phase->transform(dispX));
-}
-
-Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
-  int op = in(1)->Opcode();
-  Node* x;
-  Node* y;
-  switch (op) {
-  case Op_SubX:
-    x = in(1)->in(1);
-    // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
-    if (phase->find_intptr_t_con(x, -1) == 0)
-      break;
-    y = in(1)->in(2);
-    if (fits_in_int(phase->type(y), true)) {
-      return addP_of_X2P(phase, x, y, true);
-    }
-    break;
-  case Op_AddX:
-    x = in(1)->in(1);
-    y = in(1)->in(2);
-    if (fits_in_int(phase->type(y))) {
-      return addP_of_X2P(phase, x, y);
-    }
-    if (fits_in_int(phase->type(x))) {
-      return addP_of_X2P(phase, y, x);
-    }
-    break;
-  }
-  return NULL;
-}
-
-//------------------------------Identity---------------------------------------
-Node *CastX2PNode::Identity( PhaseTransform *phase ) {
-  if (in(1)->Opcode() == Op_CastP2X)  return in(1)->in(1);
-  return this;
-}
-
-//=============================================================================
-//------------------------------Value------------------------------------------
-const Type *CastP2XNode::Value( PhaseTransform *phase ) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  if (t->base() == Type::RawPtr && t->singleton()) {
-    uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
-    return TypeX::make(bits);
-  }
-  return CastP2XNode::bottom_type();
-}
-
-Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL;
-}
-
-//------------------------------Identity---------------------------------------
-Node *CastP2XNode::Identity( PhaseTransform *phase ) {
-  if (in(1)->Opcode() == Op_CastX2P)  return in(1)->in(1);
-  return this;
-}
-
-
-//=============================================================================
-//------------------------------Identity---------------------------------------
-// Remove redundant roundings
-Node *RoundFloatNode::Identity( PhaseTransform *phase ) {
-  assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
-  // Do not round constants
-  if (phase->type(in(1))->base() == Type::FloatCon)  return in(1);
-  int op = in(1)->Opcode();
-  // Redundant rounding
-  if( op == Op_RoundFloat ) return in(1);
-  // Already rounded
-  if( op == Op_Parm ) return in(1);
-  if( op == Op_LoadF ) return in(1);
-  return this;
-}
-
-//------------------------------Value------------------------------------------
-const Type *RoundFloatNode::Value( PhaseTransform *phase ) const {
-  return phase->type( in(1) );
-}
-
-//=============================================================================
-//------------------------------Identity---------------------------------------
-// Remove redundant roundings.  Incoming arguments are already rounded.
-Node *RoundDoubleNode::Identity( PhaseTransform *phase ) {
-  assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
-  // Do not round constants
-  if (phase->type(in(1))->base() == Type::DoubleCon)  return in(1);
-  int op = in(1)->Opcode();
-  // Redundant rounding
-  if( op == Op_RoundDouble ) return in(1);
-  // Already rounded
-  if( op == Op_Parm ) return in(1);
-  if( op == Op_LoadD ) return in(1);
-  if( op == Op_ConvF2D ) return in(1);
-  if( op == Op_ConvI2D ) return in(1);
-  return this;
-}
-
-//------------------------------Value------------------------------------------
-const Type *RoundDoubleNode::Value( PhaseTransform *phase ) const {
-  return phase->type( in(1) );
-}
-
-
-//=============================================================================
-// Do not allow value-numbering
-uint Opaque1Node::hash() const { return NO_HASH; }
-uint Opaque1Node::cmp( const Node &n ) const {
-  return (&n == this);          // Always fail except on self
-}
-
-//------------------------------Identity---------------------------------------
-// If _major_progress, then more loop optimizations follow.  Do NOT remove
-// the opaque Node until no more loop ops can happen.  Note the timing of
-// _major_progress; it's set in the major loop optimizations THEN comes the
-// call to IterGVN and any chance of hitting this code.  Hence there's no
-// phase-ordering problem with stripping Opaque1 in IGVN followed by some
-// more loop optimizations that require it.
-Node *Opaque1Node::Identity( PhaseTransform *phase ) {
-  return phase->C->major_progress() ? this : in(1);
-}
-
-//=============================================================================
-// A node to prevent unwanted optimizations.  Allows constant folding.  Stops
-// value-numbering, most Ideal calls or Identity functions.  This Node is
-// specifically designed to prevent the pre-increment value of a loop trip
-// counter from being live out of the bottom of the loop (hence causing the
-// pre- and post-increment values both being live and thus requiring an extra
-// temp register and an extra move).  If we "accidentally" optimize through
-// this kind of a Node, we'll get slightly pessimal, but correct, code.  Thus
-// it's OK to be slightly sloppy on optimizations here.
-
-// Do not allow value-numbering
-uint Opaque2Node::hash() const { return NO_HASH; }
-uint Opaque2Node::cmp( const Node &n ) const {
-  return (&n == this);          // Always fail except on self
-}
-
-
-//------------------------------Value------------------------------------------
-const Type *MoveL2DNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeLong *tl = t->is_long();
-  if( !tl->is_con() ) return bottom_type();
-  JavaValue v;
-  v.set_jlong(tl->get_con());
-  return TypeD::make( v.get_jdouble() );
-}
-
-//------------------------------Value------------------------------------------
-const Type *MoveI2FNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  const TypeInt *ti = t->is_int();
-  if( !ti->is_con() )   return bottom_type();
-  JavaValue v;
-  v.set_jint(ti->get_con());
-  return TypeF::make( v.get_jfloat() );
-}
-
-//------------------------------Value------------------------------------------
-const Type *MoveF2INode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP )       return Type::TOP;
-  if( t == Type::FLOAT ) return TypeInt::INT;
-  const TypeF *tf = t->is_float_constant();
-  JavaValue v;
-  v.set_jfloat(tf->getf());
-  return TypeInt::make( v.get_jint() );
-}
-
-//------------------------------Value------------------------------------------
-const Type *MoveD2LNode::Value( PhaseTransform *phase ) const {
-  const Type *t = phase->type( in(1) );
-  if( t == Type::TOP ) return Type::TOP;
-  if( t == Type::DOUBLE ) return TypeLong::LONG;
-  const TypeD *td = t->is_double_constant();
-  JavaValue v;
-  v.set_jdouble(td->getd());
-  return TypeLong::make( v.get_jlong() );
-}
-
-//------------------------------Value------------------------------------------
-const Type* CountLeadingZerosINode::Value(PhaseTransform* phase) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  const TypeInt* ti = t->isa_int();
-  if (ti && ti->is_con()) {
-    jint i = ti->get_con();
-    // HD, Figure 5-6
-    if (i == 0)
-      return TypeInt::make(BitsPerInt);
-    int n = 1;
-    unsigned int x = i;
-    if (x >> 16 == 0) { n += 16; x <<= 16; }
-    if (x >> 24 == 0) { n +=  8; x <<=  8; }
-    if (x >> 28 == 0) { n +=  4; x <<=  4; }
-    if (x >> 30 == 0) { n +=  2; x <<=  2; }
-    n -= x >> 31;
-    return TypeInt::make(n);
-  }
-  return TypeInt::INT;
-}
-
-//------------------------------Value------------------------------------------
-const Type* CountLeadingZerosLNode::Value(PhaseTransform* phase) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  const TypeLong* tl = t->isa_long();
-  if (tl && tl->is_con()) {
-    jlong l = tl->get_con();
-    // HD, Figure 5-6
-    if (l == 0)
-      return TypeInt::make(BitsPerLong);
-    int n = 1;
-    unsigned int x = (((julong) l) >> 32);
-    if (x == 0) { n += 32; x = (int) l; }
-    if (x >> 16 == 0) { n += 16; x <<= 16; }
-    if (x >> 24 == 0) { n +=  8; x <<=  8; }
-    if (x >> 28 == 0) { n +=  4; x <<=  4; }
-    if (x >> 30 == 0) { n +=  2; x <<=  2; }
-    n -= x >> 31;
-    return TypeInt::make(n);
-  }
-  return TypeInt::INT;
-}
-
-//------------------------------Value------------------------------------------
-const Type* CountTrailingZerosINode::Value(PhaseTransform* phase) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  const TypeInt* ti = t->isa_int();
-  if (ti && ti->is_con()) {
-    jint i = ti->get_con();
-    // HD, Figure 5-14
-    int y;
-    if (i == 0)
-      return TypeInt::make(BitsPerInt);
-    int n = 31;
-    y = i << 16; if (y != 0) { n = n - 16; i = y; }
-    y = i <<  8; if (y != 0) { n = n -  8; i = y; }
-    y = i <<  4; if (y != 0) { n = n -  4; i = y; }
-    y = i <<  2; if (y != 0) { n = n -  2; i = y; }
-    y = i <<  1; if (y != 0) { n = n -  1; }
-    return TypeInt::make(n);
-  }
-  return TypeInt::INT;
-}
-
-//------------------------------Value------------------------------------------
-const Type* CountTrailingZerosLNode::Value(PhaseTransform* phase) const {
-  const Type* t = phase->type(in(1));
-  if (t == Type::TOP) return Type::TOP;
-  const TypeLong* tl = t->isa_long();
-  if (tl && tl->is_con()) {
-    jlong l = tl->get_con();
-    // HD, Figure 5-14
-    int x, y;
-    if (l == 0)
-      return TypeInt::make(BitsPerLong);
-    int n = 63;
-    y = (int) l; if (y != 0) { n = n - 32; x = y; } else x = (((julong) l) >> 32);
-    y = x << 16; if (y != 0) { n = n - 16; x = y; }
-    y = x <<  8; if (y != 0) { n = n -  8; x = y; }
-    y = x <<  4; if (y != 0) { n = n -  4; x = y; }
-    y = x <<  2; if (y != 0) { n = n -  2; x = y; }
-    y = x <<  1; if (y != 0) { n = n -  1; }
-    return TypeInt::make(n);
-  }
-  return TypeInt::INT;
-}
--- a/hotspot/src/share/vm/opto/connode.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/connode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -139,630 +139,16 @@
 
 };
 
-//------------------------------BinaryNode-------------------------------------
-// Place holder for the 2 conditional inputs to a CMove.  CMove needs 4
-// inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
-// compare), and the 2 values to select between.  The Matcher requires a
-// binary tree so we break it down like this:
-//     (CMove (Binary bol cmp) (Binary src1 src2))
-class BinaryNode : public Node {
-public:
-  BinaryNode( Node *n1, Node *n2 ) : Node(0,n1,n2) { }
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return 0; }
-};
-
-//------------------------------CMoveNode--------------------------------------
-// Conditional move
-class CMoveNode : public TypeNode {
-public:
-  enum { Control,               // When is it safe to do this cmove?
-         Condition,             // Condition controlling the cmove
-         IfFalse,               // Value if condition is false
-         IfTrue };              // Value if condition is true
-  CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
-  {
-    init_class_id(Class_CMove);
-    // all inputs are nullified in Node::Node(int)
-    // init_req(Control,NULL);
-    init_req(Condition,bol);
-    init_req(IfFalse,left);
-    init_req(IfTrue,right);
-  }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
-  // Helper function to spot cmove graph shapes
-  static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
-};
-
-//------------------------------CMoveDNode-------------------------------------
-class CMoveDNode : public CMoveNode {
-public:
-  CMoveDNode( Node *bol, Node *left, Node *right, const Type* t) : CMoveNode(bol,left,right,t){}
-  virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-};
-
-//------------------------------CMoveFNode-------------------------------------
-class CMoveFNode : public CMoveNode {
-public:
-  CMoveFNode( Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) {}
-  virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-};
-
-//------------------------------CMoveINode-------------------------------------
-class CMoveINode : public CMoveNode {
-public:
-  CMoveINode( Node *bol, Node *left, Node *right, const TypeInt *ti ) : CMoveNode(bol,left,right,ti){}
-  virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-};
-
-//------------------------------CMoveLNode-------------------------------------
-class CMoveLNode : public CMoveNode {
-public:
-  CMoveLNode(Node *bol, Node *left, Node *right, const TypeLong *tl ) : CMoveNode(bol,left,right,tl){}
-  virtual int Opcode() const;
-};
-
-//------------------------------CMovePNode-------------------------------------
-class CMovePNode : public CMoveNode {
-public:
-  CMovePNode( Node *c, Node *bol, Node *left, Node *right, const TypePtr* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
-  virtual int Opcode() const;
-};
-
-//------------------------------CMoveNNode-------------------------------------
-class CMoveNNode : public CMoveNode {
-public:
-  CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
-  virtual int Opcode() const;
-};
-
-//------------------------------ConstraintCastNode-----------------------------
-// cast to a different range
-class ConstraintCastNode: public TypeNode {
-public:
-  ConstraintCastNode (Node *n, const Type *t ): TypeNode(t,2) {
-    init_class_id(Class_ConstraintCast);
-    init_req(1, n);
-  }
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const = 0;
-  virtual Node *Ideal_DU_postCCP( PhaseCCP * );
-};
-
-//------------------------------CastIINode-------------------------------------
-// cast integer to integer (different range)
-class CastIINode: public ConstraintCastNode {
-public:
-  CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return Op_RegI; }
-};
-
-//------------------------------CastPPNode-------------------------------------
-// cast pointer to pointer (different type)
-class CastPPNode: public ConstraintCastNode {
-public:
-  CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {}
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return Op_RegP; }
-  virtual Node *Ideal_DU_postCCP( PhaseCCP * );
-};
-
-//------------------------------CheckCastPPNode--------------------------------
-// for _checkcast, cast pointer to pointer (different type), without JOIN,
-class CheckCastPPNode: public TypeNode {
-public:
-  CheckCastPPNode( Node *c, Node *n, const Type *t ) : TypeNode(t,2) {
-    init_class_id(Class_CheckCastPP);
-    init_req(0, c);
-    init_req(1, n);
-  }
-
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual int   Opcode() const;
-  virtual uint  ideal_reg() const { return Op_RegP; }
-  // No longer remove CheckCast after CCP as it gives me a place to hang
-  // the proper address type - which is required to compute anti-deps.
-  //virtual Node *Ideal_DU_postCCP( PhaseCCP * );
-};
-
-
-//------------------------------EncodeNarrowPtr--------------------------------
-class EncodeNarrowPtrNode : public TypeNode {
- protected:
-  EncodeNarrowPtrNode(Node* value, const Type* type):
-    TypeNode(type, 2) {
-    init_class_id(Class_EncodeNarrowPtr);
-    init_req(0, NULL);
-    init_req(1, value);
-  }
- public:
-  virtual uint  ideal_reg() const { return Op_RegN; }
-  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
-};
-
-//------------------------------EncodeP--------------------------------
-// Encodes an oop pointers into its compressed form
-// Takes an extra argument which is the real heap base as a long which
-// may be useful for code generation in the backend.
-class EncodePNode : public EncodeNarrowPtrNode {
- public:
-  EncodePNode(Node* value, const Type* type):
-    EncodeNarrowPtrNode(value, type) {
-    init_class_id(Class_EncodeP);
-  }
-  virtual int Opcode() const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-};
-
-//------------------------------EncodePKlass--------------------------------
-// Encodes a klass pointer into its compressed form
-// Takes an extra argument which is the real heap base as a long which
-// may be useful for code generation in the backend.
-class EncodePKlassNode : public EncodeNarrowPtrNode {
- public:
-  EncodePKlassNode(Node* value, const Type* type):
-    EncodeNarrowPtrNode(value, type) {
-    init_class_id(Class_EncodePKlass);
-  }
-  virtual int Opcode() const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-};
-
-//------------------------------DecodeNarrowPtr--------------------------------
-class DecodeNarrowPtrNode : public TypeNode {
- protected:
-  DecodeNarrowPtrNode(Node* value, const Type* type):
-    TypeNode(type, 2) {
-    init_class_id(Class_DecodeNarrowPtr);
-    init_req(0, NULL);
-    init_req(1, value);
-  }
- public:
-  virtual uint  ideal_reg() const { return Op_RegP; }
-};
-
-//------------------------------DecodeN--------------------------------
-// Converts a narrow oop into a real oop ptr.
-// Takes an extra argument which is the real heap base as a long which
-// may be useful for code generation in the backend.
-class DecodeNNode : public DecodeNarrowPtrNode {
- public:
-  DecodeNNode(Node* value, const Type* type):
-    DecodeNarrowPtrNode(value, type) {
-    init_class_id(Class_DecodeN);
-  }
-  virtual int Opcode() const;
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-};
-
-//------------------------------DecodeNKlass--------------------------------
-// Converts a narrow klass pointer into a real klass ptr.
-// Takes an extra argument which is the real heap base as a long which
-// may be useful for code generation in the backend.
-class DecodeNKlassNode : public DecodeNarrowPtrNode {
- public:
-  DecodeNKlassNode(Node* value, const Type* type):
-    DecodeNarrowPtrNode(value, type) {
-    init_class_id(Class_DecodeNKlass);
-  }
-  virtual int Opcode() const;
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-};
-
-//------------------------------Conv2BNode-------------------------------------
-// Convert int/pointer to a Boolean.  Map zero to zero, all else to 1.
-class Conv2BNode : public Node {
-public:
-  Conv2BNode( Node *i ) : Node(0,i) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::BOOL; }
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual uint  ideal_reg() const { return Op_RegI; }
-};
-
-// The conversions operations are all Alpha sorted.  Please keep it that way!
-//------------------------------ConvD2FNode------------------------------------
-// Convert double to float
-class ConvD2FNode : public Node {
-public:
-  ConvD2FNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::FLOAT; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual uint  ideal_reg() const { return Op_RegF; }
-};
-
-//------------------------------ConvD2INode------------------------------------
-// Convert Double to Integer
-class ConvD2INode : public Node {
-public:
-  ConvD2INode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint  ideal_reg() const { return Op_RegI; }
-};
-
-//------------------------------ConvD2LNode------------------------------------
-// Convert Double to Long
-class ConvD2LNode : public Node {
-public:
-  ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeLong::LONG; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint ideal_reg() const { return Op_RegL; }
-};
-
-//------------------------------ConvF2DNode------------------------------------
-// Convert Float to a Double.
-class ConvF2DNode : public Node {
-public:
-  ConvF2DNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::DOUBLE; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual uint  ideal_reg() const { return Op_RegD; }
-};
-
-//------------------------------ConvF2INode------------------------------------
-// Convert float to integer
-class ConvF2INode : public Node {
-public:
-  ConvF2INode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint  ideal_reg() const { return Op_RegI; }
-};
-
-//------------------------------ConvF2LNode------------------------------------
-// Convert float to long
-class ConvF2LNode : public Node {
-public:
-  ConvF2LNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeLong::LONG; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint  ideal_reg() const { return Op_RegL; }
-};
-
-//------------------------------ConvI2DNode------------------------------------
-// Convert Integer to Double
-class ConvI2DNode : public Node {
-public:
-  ConvI2DNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::DOUBLE; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual uint  ideal_reg() const { return Op_RegD; }
-};
-
-//------------------------------ConvI2FNode------------------------------------
-// Convert Integer to Float
-class ConvI2FNode : public Node {
-public:
-  ConvI2FNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::FLOAT; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual uint  ideal_reg() const { return Op_RegF; }
-};
-
-//------------------------------ConvI2LNode------------------------------------
-// Convert integer to long
-class ConvI2LNode : public TypeNode {
-public:
-  ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
-    : TypeNode(t, 2)
-  { init_req(1, in1); }
-  virtual int Opcode() const;
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint  ideal_reg() const { return Op_RegL; }
-};
-
-//------------------------------ConvL2DNode------------------------------------
-// Convert Long to Double
-class ConvL2DNode : public Node {
-public:
-  ConvL2DNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::DOUBLE; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual uint ideal_reg() const { return Op_RegD; }
-};
-
-//------------------------------ConvL2FNode------------------------------------
-// Convert Long to Float
-class ConvL2FNode : public Node {
-public:
-  ConvL2FNode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::FLOAT; }
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual uint  ideal_reg() const { return Op_RegF; }
-};
-
-//------------------------------ConvL2INode------------------------------------
-// Convert long to integer
-class ConvL2INode : public Node {
-public:
-  ConvL2INode( Node *in1 ) : Node(0,in1) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual uint  ideal_reg() const { return Op_RegI; }
-};
-
-//------------------------------CastX2PNode-------------------------------------
-// convert a machine-pointer-sized integer to a raw pointer
-class CastX2PNode : public Node {
-public:
-  CastX2PNode( Node *n ) : Node(NULL, n) {}
-  virtual int Opcode() const;
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual uint ideal_reg() const { return Op_RegP; }
-  virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
-};
-
-//------------------------------CastP2XNode-------------------------------------
-// Used in both 32-bit and 64-bit land.
-// Used for card-marks and unsafe pointer math.
-class CastP2XNode : public Node {
-public:
-  CastP2XNode( Node *ctrl, Node *n ) : Node(ctrl, n) {}
-  virtual int Opcode() const;
-  virtual const Type *Value( PhaseTransform *phase ) const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual uint ideal_reg() const { return Op_RegX; }
-  virtual const Type *bottom_type() const { return TypeX_X; }
-  // Return false to keep node from moving away from an associated card mark.
-  virtual bool depends_only_on_test() const { return false; }
-};
-
 //------------------------------ThreadLocalNode--------------------------------
 // Ideal Node which returns the base of ThreadLocalStorage.
 class ThreadLocalNode : public Node {
 public:
-  ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
-  virtual uint ideal_reg() const { return Op_RegP; }
-};
-
-//------------------------------LoadReturnPCNode-------------------------------
-class LoadReturnPCNode: public Node {
-public:
-  LoadReturnPCNode(Node *c) : Node(c) { }
-  virtual int Opcode() const;
-  virtual uint ideal_reg() const { return Op_RegP; }
-};
-
-
-//-----------------------------RoundFloatNode----------------------------------
-class RoundFloatNode: public Node {
-public:
-  RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
-  virtual int   Opcode() const;
-  virtual const Type *bottom_type() const { return Type::FLOAT; }
-  virtual uint  ideal_reg() const { return Op_RegF; }
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-};
-
-
-//-----------------------------RoundDoubleNode---------------------------------
-class RoundDoubleNode: public Node {
-public:
-  RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
-  virtual int   Opcode() const;
-  virtual const Type *bottom_type() const { return Type::DOUBLE; }
-  virtual uint  ideal_reg() const { return Op_RegD; }
-  virtual Node *Identity( PhaseTransform *phase );
-  virtual const Type *Value( PhaseTransform *phase ) const;
-};
-
-//------------------------------Opaque1Node------------------------------------
-// A node to prevent unwanted optimizations.  Allows constant folding.
-// Stops value-numbering, Ideal calls or Identity functions.
-class Opaque1Node : public Node {
-  virtual uint hash() const ;                  // { return NO_HASH; }
-  virtual uint cmp( const Node &n ) const;
-public:
-  Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
-    // Put it on the Macro nodes list to removed during macro nodes expansion.
-    init_flags(Flag_is_macro);
-    C->add_macro_node(this);
-  }
-  // Special version for the pre-loop to hold the original loop limit
-  // which is consumed by range check elimination.
-  Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
-    // Put it on the Macro nodes list to removed during macro nodes expansion.
-    init_flags(Flag_is_macro);
-    C->add_macro_node(this);
-  }
-  Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-  virtual Node *Identity( PhaseTransform *phase );
-};
-
-//------------------------------Opaque2Node------------------------------------
-// A node to prevent unwanted optimizations.  Allows constant folding.  Stops
-// value-numbering, most Ideal calls or Identity functions.  This Node is
-// specifically designed to prevent the pre-increment value of a loop trip
-// counter from being live out of the bottom of the loop (hence causing the
-// pre- and post-increment values both being live and thus requiring an extra
-// temp register and an extra move).  If we "accidentally" optimize through
-// this kind of a Node, we'll get slightly pessimal, but correct, code.  Thus
-// it's OK to be slightly sloppy on optimizations here.
-class Opaque2Node : public Node {
-  virtual uint hash() const ;                  // { return NO_HASH; }
-  virtual uint cmp( const Node &n ) const;
-public:
-  Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
-    // Put it on the Macro nodes list to removed during macro nodes expansion.
-    init_flags(Flag_is_macro);
-    C->add_macro_node(this);
-  }
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-};
-
-//------------------------------Opaque3Node------------------------------------
-// A node to prevent unwanted optimizations. Will be optimized only during
-// macro nodes expansion.
-class Opaque3Node : public Opaque2Node {
-  int _opt; // what optimization it was used for
-public:
-  enum { RTM_OPT };
-  Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
-  virtual int Opcode() const;
-  bool rtm_opt() const { return (_opt == RTM_OPT); }
+    ThreadLocalNode( ) : Node((Node*)Compile::current()->root()) {}
+    virtual int Opcode() const;
+    virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM;}
+    virtual uint ideal_reg() const { return Op_RegP; }
 };
 
 
-//----------------------PartialSubtypeCheckNode--------------------------------
-// The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
-// array for an instance of the superklass.  Set a hidden internal cache on a
-// hit (cache is checked with exposed code in gen_subtype_check()).  Return
-// not zero for a miss or zero for a hit.
-class PartialSubtypeCheckNode : public Node {
-public:
-  PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
-  virtual uint ideal_reg() const { return Op_RegP; }
-};
-
-//
-class MoveI2FNode : public Node {
- public:
-  MoveI2FNode( Node *value ) : Node(0,value) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::FLOAT; }
-  virtual uint ideal_reg() const { return Op_RegF; }
-  virtual const Type* Value( PhaseTransform *phase ) const;
-};
-
-class MoveL2DNode : public Node {
- public:
-  MoveL2DNode( Node *value ) : Node(0,value) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return Type::DOUBLE; }
-  virtual uint ideal_reg() const { return Op_RegD; }
-  virtual const Type* Value( PhaseTransform *phase ) const;
-};
-
-class MoveF2INode : public Node {
- public:
-  MoveF2INode( Node *value ) : Node(0,value) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeInt::INT; }
-  virtual uint ideal_reg() const { return Op_RegI; }
-  virtual const Type* Value( PhaseTransform *phase ) const;
-};
-
-class MoveD2LNode : public Node {
- public:
-  MoveD2LNode( Node *value ) : Node(0,value) {}
-  virtual int Opcode() const;
-  virtual const Type *bottom_type() const { return TypeLong::LONG; }
-  virtual uint ideal_reg() const { return Op_RegL; }
-  virtual const Type* Value( PhaseTransform *phase ) const;
-};
-
-//---------- CountBitsNode -----------------------------------------------------
-class CountBitsNode : public Node {
-public:
-  CountBitsNode(Node* in1) : Node(0, in1) {}
-  const Type* bottom_type() const { return TypeInt::INT; }
-  virtual uint ideal_reg() const { return Op_RegI; }
-};
-
-//---------- CountLeadingZerosINode --------------------------------------------
-// Count leading zeros (0-bit count starting from MSB) of an integer.
-class CountLeadingZerosINode : public CountBitsNode {
-public:
-  CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-  virtual const Type* Value(PhaseTransform* phase) const;
-};
-
-//---------- CountLeadingZerosLNode --------------------------------------------
-// Count leading zeros (0-bit count starting from MSB) of a long.
-class CountLeadingZerosLNode : public CountBitsNode {
-public:
-  CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-  virtual const Type* Value(PhaseTransform* phase) const;
-};
-
-//---------- CountTrailingZerosINode -------------------------------------------
-// Count trailing zeros (0-bit count starting from LSB) of an integer.
-class CountTrailingZerosINode : public CountBitsNode {
-public:
-  CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-  virtual const Type* Value(PhaseTransform* phase) const;
-};
-
-//---------- CountTrailingZerosLNode -------------------------------------------
-// Count trailing zeros (0-bit count starting from LSB) of a long.
-class CountTrailingZerosLNode : public CountBitsNode {
-public:
-  CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-  virtual const Type* Value(PhaseTransform* phase) const;
-};
-
-//---------- PopCountINode -----------------------------------------------------
-// Population count (bit count) of an integer.
-class PopCountINode : public CountBitsNode {
-public:
-  PopCountINode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-};
-
-//---------- PopCountLNode -----------------------------------------------------
-// Population count (bit count) of a long.
-class PopCountLNode : public CountBitsNode {
-public:
-  PopCountLNode(Node* in1) : CountBitsNode(in1) {}
-  virtual int Opcode() const;
-};
 
 #endif // SHARE_VM_OPTO_CONNODE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/convertnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/addnode.hpp"
+#include "opto/convertnode.hpp"
+#include "opto/matcher.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/subnode.hpp"
+
+//=============================================================================
+//------------------------------Identity---------------------------------------
+Node *Conv2BNode::Identity( PhaseTransform *phase ) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+  if( t == TypeInt::ZERO ) return in(1);
+  if( t == TypeInt::ONE ) return in(1);
+  if( t == TypeInt::BOOL ) return in(1);
+  return this;
+}
+
+//------------------------------Value------------------------------------------
+const Type *Conv2BNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == TypeInt::ZERO ) return TypeInt::ZERO;
+  if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO;
+  const TypePtr *tp = t->isa_ptr();
+  if( tp != NULL ) {
+    if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP;
+    if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE;
+    if (tp->ptr() == TypePtr::NotNull)  return TypeInt::ONE;
+    return TypeInt::BOOL;
+  }
+  if (t->base() != Type::Int) return TypeInt::BOOL;
+  const TypeInt *ti = t->is_int();
+  if( ti->_hi < 0 || ti->_lo > 0 ) return TypeInt::ONE;
+  return TypeInt::BOOL;
+}
+
+
+// The conversions operations are all Alpha sorted.  Please keep it that way!
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvD2FNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == Type::DOUBLE ) return Type::FLOAT;
+  const TypeD *td = t->is_double_constant();
+  return TypeF::make( (float)td->getd() );
+}
+
+//------------------------------Identity---------------------------------------
+// Float's can be converted to doubles with no loss of bits.  Hence
+// converting a float to a double and back to a float is a NOP.
+Node *ConvD2FNode::Identity(PhaseTransform *phase) {
+  return (in(1)->Opcode() == Op_ConvF2D) ? in(1)->in(1) : this;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvD2INode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == Type::DOUBLE ) return TypeInt::INT;
+  const TypeD *td = t->is_double_constant();
+  return TypeInt::make( SharedRuntime::d2i( td->getd() ) );
+}
+
+//------------------------------Ideal------------------------------------------
+// If converting to an int type, skip any rounding nodes
+Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if( in(1)->Opcode() == Op_RoundDouble )
+  set_req(1,in(1)->in(1));
+  return NULL;
+}
+
+//------------------------------Identity---------------------------------------
+// Int's can be converted to doubles with no loss of bits.  Hence
+// converting an integer to a double and back to an integer is a NOP.
+Node *ConvD2INode::Identity(PhaseTransform *phase) {
+  return (in(1)->Opcode() == Op_ConvI2D) ? in(1)->in(1) : this;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvD2LNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == Type::DOUBLE ) return TypeLong::LONG;
+  const TypeD *td = t->is_double_constant();
+  return TypeLong::make( SharedRuntime::d2l( td->getd() ) );
+}
+
+//------------------------------Identity---------------------------------------
+Node *ConvD2LNode::Identity(PhaseTransform *phase) {
+  // Remove ConvD2L->ConvL2D->ConvD2L sequences.
+  if( in(1)       ->Opcode() == Op_ConvL2D &&
+     in(1)->in(1)->Opcode() == Op_ConvD2L )
+  return in(1)->in(1);
+  return this;
+}
+
+//------------------------------Ideal------------------------------------------
+// If converting to an int type, skip any rounding nodes
+Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if( in(1)->Opcode() == Op_RoundDouble )
+  set_req(1,in(1)->in(1));
+  return NULL;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvF2DNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == Type::FLOAT ) return Type::DOUBLE;
+  const TypeF *tf = t->is_float_constant();
+  return TypeD::make( (double)tf->getf() );
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvF2INode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP )       return Type::TOP;
+  if( t == Type::FLOAT ) return TypeInt::INT;
+  const TypeF *tf = t->is_float_constant();
+  return TypeInt::make( SharedRuntime::f2i( tf->getf() ) );
+}
+
+//------------------------------Identity---------------------------------------
+Node *ConvF2INode::Identity(PhaseTransform *phase) {
+  // Remove ConvF2I->ConvI2F->ConvF2I sequences.
+  if( in(1)       ->Opcode() == Op_ConvI2F &&
+     in(1)->in(1)->Opcode() == Op_ConvF2I )
+  return in(1)->in(1);
+  return this;
+}
+
+//------------------------------Ideal------------------------------------------
+// If converting to an int type, skip any rounding nodes
+Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if( in(1)->Opcode() == Op_RoundFloat )
+  set_req(1,in(1)->in(1));
+  return NULL;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvF2LNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP )       return Type::TOP;
+  if( t == Type::FLOAT ) return TypeLong::LONG;
+  const TypeF *tf = t->is_float_constant();
+  return TypeLong::make( SharedRuntime::f2l( tf->getf() ) );
+}
+
+//------------------------------Identity---------------------------------------
+Node *ConvF2LNode::Identity(PhaseTransform *phase) {
+  // Remove ConvF2L->ConvL2F->ConvF2L sequences.
+  if( in(1)       ->Opcode() == Op_ConvL2F &&
+     in(1)->in(1)->Opcode() == Op_ConvF2L )
+  return in(1)->in(1);
+  return this;
+}
+
+//------------------------------Ideal------------------------------------------
+// If converting to an int type, skip any rounding nodes
+Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if( in(1)->Opcode() == Op_RoundFloat )
+  set_req(1,in(1)->in(1));
+  return NULL;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvI2DNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeInt *ti = t->is_int();
+  if( ti->is_con() ) return TypeD::make( (double)ti->get_con() );
+  return bottom_type();
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvI2FNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeInt *ti = t->is_int();
+  if( ti->is_con() ) return TypeF::make( (float)ti->get_con() );
+  return bottom_type();
+}
+
+//------------------------------Identity---------------------------------------
+Node *ConvI2FNode::Identity(PhaseTransform *phase) {
+  // Remove ConvI2F->ConvF2I->ConvI2F sequences.
+  if( in(1)       ->Opcode() == Op_ConvF2I &&
+     in(1)->in(1)->Opcode() == Op_ConvI2F )
+  return in(1)->in(1);
+  return this;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvI2LNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeInt *ti = t->is_int();
+  const Type* tl = TypeLong::make(ti->_lo, ti->_hi, ti->_widen);
+  // Join my declared type against my incoming type.
+  tl = tl->filter(_type);
+  return tl;
+}
+
+#ifdef _LP64
+static inline bool long_ranges_overlap(jlong lo1, jlong hi1,
+                                       jlong lo2, jlong hi2) {
+  // Two ranges overlap iff one range's low point falls in the other range.
+  return (lo2 <= lo1 && lo1 <= hi2) || (lo1 <= lo2 && lo2 <= hi1);
+}
+#endif
+
+//------------------------------Ideal------------------------------------------
+Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  const TypeLong* this_type = this->type()->is_long();
+  Node* this_changed = NULL;
+
+  // If _major_progress, then more loop optimizations follow.  Do NOT
+  // remove this node's type assertion until no more loop ops can happen.
+  // The progress bit is set in the major loop optimizations THEN comes the
+  // call to IterGVN and any chance of hitting this code.  Cf. Opaque1Node.
+  if (can_reshape && !phase->C->major_progress()) {
+    const TypeInt* in_type = phase->type(in(1))->isa_int();
+    if (in_type != NULL && this_type != NULL &&
+        (in_type->_lo != this_type->_lo ||
+         in_type->_hi != this_type->_hi)) {
+          // Although this WORSENS the type, it increases GVN opportunities,
+          // because I2L nodes with the same input will common up, regardless
+          // of slightly differing type assertions.  Such slight differences
+          // arise routinely as a result of loop unrolling, so this is a
+          // post-unrolling graph cleanup.  Choose a type which depends only
+          // on my input.  (Exception:  Keep a range assertion of >=0 or <0.)
+          jlong lo1 = this_type->_lo;
+          jlong hi1 = this_type->_hi;
+          int   w1  = this_type->_widen;
+          if (lo1 != (jint)lo1 ||
+              hi1 != (jint)hi1 ||
+              lo1 > hi1) {
+            // Overflow leads to wraparound, wraparound leads to range saturation.
+            lo1 = min_jint; hi1 = max_jint;
+          } else if (lo1 >= 0) {
+            // Keep a range assertion of >=0.
+            lo1 = 0;        hi1 = max_jint;
+          } else if (hi1 < 0) {
+            // Keep a range assertion of <0.
+            lo1 = min_jint; hi1 = -1;
+          } else {
+            lo1 = min_jint; hi1 = max_jint;
+          }
+          const TypeLong* wtype = TypeLong::make(MAX2((jlong)in_type->_lo, lo1),
+                                                 MIN2((jlong)in_type->_hi, hi1),
+                                                 MAX2((int)in_type->_widen, w1));
+          if (wtype != type()) {
+            set_type(wtype);
+            // Note: this_type still has old type value, for the logic below.
+            this_changed = this;
+          }
+        }
+  }
+
+#ifdef _LP64
+  // Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
+  // but only if x and y have subranges that cannot cause 32-bit overflow,
+  // under the assumption that x+y is in my own subrange this->type().
+
+  // This assumption is based on a constraint (i.e., type assertion)
+  // established in Parse::array_addressing or perhaps elsewhere.
+  // This constraint has been adjoined to the "natural" type of
+  // the incoming argument in(0).  We know (because of runtime
+  // checks) - that the result value I2L(x+y) is in the joined range.
+  // Hence we can restrict the incoming terms (x, y) to values such
+  // that their sum also lands in that range.
+
+  // This optimization is useful only on 64-bit systems, where we hope
+  // the addition will end up subsumed in an addressing mode.
+  // It is necessary to do this when optimizing an unrolled array
+  // copy loop such as x[i++] = y[i++].
+
+  // On 32-bit systems, it's better to perform as much 32-bit math as
+  // possible before the I2L conversion, because 32-bit math is cheaper.
+  // There's no common reason to "leak" a constant offset through the I2L.
+  // Addressing arithmetic will not absorb it as part of a 64-bit AddL.
+
+  Node* z = in(1);
+  int op = z->Opcode();
+  if (op == Op_AddI || op == Op_SubI) {
+    Node* x = z->in(1);
+    Node* y = z->in(2);
+    assert (x != z && y != z, "dead loop in ConvI2LNode::Ideal");
+    if (phase->type(x) == Type::TOP)  return this_changed;
+    if (phase->type(y) == Type::TOP)  return this_changed;
+    const TypeInt*  tx = phase->type(x)->is_int();
+    const TypeInt*  ty = phase->type(y)->is_int();
+    const TypeLong* tz = this_type;
+    jlong xlo = tx->_lo;
+    jlong xhi = tx->_hi;
+    jlong ylo = ty->_lo;
+    jlong yhi = ty->_hi;
+    jlong zlo = tz->_lo;
+    jlong zhi = tz->_hi;
+    jlong vbit = CONST64(1) << BitsPerInt;
+    int widen =  MAX2(tx->_widen, ty->_widen);
+    if (op == Op_SubI) {
+      jlong ylo0 = ylo;
+      ylo = -yhi;
+      yhi = -ylo0;
+    }
+    // See if x+y can cause positive overflow into z+2**32
+    if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo+vbit, zhi+vbit)) {
+      return this_changed;
+    }
+    // See if x+y can cause negative overflow into z-2**32
+    if (long_ranges_overlap(xlo+ylo, xhi+yhi, zlo-vbit, zhi-vbit)) {
+      return this_changed;
+    }
+    // Now it's always safe to assume x+y does not overflow.
+    // This is true even if some pairs x,y might cause overflow, as long
+    // as that overflow value cannot fall into [zlo,zhi].
+
+    // Confident that the arithmetic is "as if infinite precision",
+    // we can now use z's range to put constraints on those of x and y.
+    // The "natural" range of x [xlo,xhi] can perhaps be narrowed to a
+    // more "restricted" range by intersecting [xlo,xhi] with the
+    // range obtained by subtracting y's range from the asserted range
+    // of the I2L conversion.  Here's the interval arithmetic algebra:
+    //    x == z-y == [zlo,zhi]-[ylo,yhi] == [zlo,zhi]+[-yhi,-ylo]
+    //    => x in [zlo-yhi, zhi-ylo]
+    //    => x in [zlo-yhi, zhi-ylo] INTERSECT [xlo,xhi]
+    //    => x in [xlo MAX zlo-yhi, xhi MIN zhi-ylo]
+    jlong rxlo = MAX2(xlo, zlo - yhi);
+    jlong rxhi = MIN2(xhi, zhi - ylo);
+    // And similarly, x changing place with y:
+    jlong rylo = MAX2(ylo, zlo - xhi);
+    jlong ryhi = MIN2(yhi, zhi - xlo);
+    if (rxlo > rxhi || rylo > ryhi) {
+      return this_changed;  // x or y is dying; don't mess w/ it
+    }
+    if (op == Op_SubI) {
+      jlong rylo0 = rylo;
+      rylo = -ryhi;
+      ryhi = -rylo0;
+    }
+
+    Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
+    Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
+    switch (op) {
+      case Op_AddI:  return new (phase->C) AddLNode(cx, cy);
+      case Op_SubI:  return new (phase->C) SubLNode(cx, cy);
+      default:       ShouldNotReachHere();
+    }
+  }
+#endif //_LP64
+
+  return this_changed;
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvL2DNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeLong *tl = t->is_long();
+  if( tl->is_con() ) return TypeD::make( (double)tl->get_con() );
+  return bottom_type();
+}
+
+//=============================================================================
+//------------------------------Value------------------------------------------
+const Type *ConvL2FNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeLong *tl = t->is_long();
+  if( tl->is_con() ) return TypeF::make( (float)tl->get_con() );
+  return bottom_type();
+}
+
+//=============================================================================
+//----------------------------Identity-----------------------------------------
+Node *ConvL2INode::Identity( PhaseTransform *phase ) {
+  // Convert L2I(I2L(x)) => x
+  if (in(1)->Opcode() == Op_ConvI2L)  return in(1)->in(1);
+  return this;
+}
+
+//------------------------------Value------------------------------------------
+const Type *ConvL2INode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeLong *tl = t->is_long();
+  if (tl->is_con())
+  // Easy case.
+  return TypeInt::make((jint)tl->get_con());
+  return bottom_type();
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+// Blow off prior masking to int
+Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  Node *andl = in(1);
+  uint andl_op = andl->Opcode();
+  if( andl_op == Op_AndL ) {
+    // Blow off prior masking to int
+    if( phase->type(andl->in(2)) == TypeLong::make( 0xFFFFFFFF ) ) {
+      set_req(1,andl->in(1));
+      return this;
+    }
+  }
+
+  // Swap with a prior add: convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
+  // This replaces an 'AddL' with an 'AddI'.
+  if( andl_op == Op_AddL ) {
+    // Don't do this for nodes which have more than one user since
+    // we'll end up computing the long add anyway.
+    if (andl->outcnt() > 1) return NULL;
+
+    Node* x = andl->in(1);
+    Node* y = andl->in(2);
+    assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" );
+    if (phase->type(x) == Type::TOP)  return NULL;
+    if (phase->type(y) == Type::TOP)  return NULL;
+    Node *add1 = phase->transform(new (phase->C) ConvL2INode(x));
+    Node *add2 = phase->transform(new (phase->C) ConvL2INode(y));
+    return new (phase->C) AddINode(add1,add2);
+  }
+
+  // Disable optimization: LoadL->ConvL2I ==> LoadI.
+  // It causes problems (sizes of Load and Store nodes do not match)
+  // in objects initialization code and Escape Analysis.
+  return NULL;
+}
+
+
+
+//=============================================================================
+//------------------------------Identity---------------------------------------
+// Remove redundant roundings
+Node *RoundFloatNode::Identity( PhaseTransform *phase ) {
+  assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
+  // Do not round constants
+  if (phase->type(in(1))->base() == Type::FloatCon)  return in(1);
+  int op = in(1)->Opcode();
+  // Redundant rounding
+  if( op == Op_RoundFloat ) return in(1);
+  // Already rounded
+  if( op == Op_Parm ) return in(1);
+  if( op == Op_LoadF ) return in(1);
+  return this;
+}
+
+//------------------------------Value------------------------------------------
+const Type *RoundFloatNode::Value( PhaseTransform *phase ) const {
+  return phase->type( in(1) );
+}
+
+//=============================================================================
+//------------------------------Identity---------------------------------------
+// Remove redundant roundings.  Incoming arguments are already rounded.
+Node *RoundDoubleNode::Identity( PhaseTransform *phase ) {
+  assert(Matcher::strict_fp_requires_explicit_rounding, "should only generate for Intel");
+  // Do not round constants
+  if (phase->type(in(1))->base() == Type::DoubleCon)  return in(1);
+  int op = in(1)->Opcode();
+  // Redundant rounding
+  if( op == Op_RoundDouble ) return in(1);
+  // Already rounded
+  if( op == Op_Parm ) return in(1);
+  if( op == Op_LoadD ) return in(1);
+  if( op == Op_ConvF2D ) return in(1);
+  if( op == Op_ConvI2D ) return in(1);
+  return this;
+}
+
+//------------------------------Value------------------------------------------
+const Type *RoundDoubleNode::Value( PhaseTransform *phase ) const {
+  return phase->type( in(1) );
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/convertnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_CONVERTNODE_HPP
+#define SHARE_VM_OPTO_CONVERTNODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+
+//------------------------------Conv2BNode-------------------------------------
+// Convert int/pointer to a Boolean.  Map zero to zero, all else to 1.
+class Conv2BNode : public Node {
+  public:
+  Conv2BNode( Node *i ) : Node(0,i) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::BOOL; }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual uint  ideal_reg() const { return Op_RegI; }
+};
+
+// The conversions operations are all Alpha sorted.  Please keep it that way!
+//------------------------------ConvD2FNode------------------------------------
+// Convert double to float
+class ConvD2FNode : public Node {
+  public:
+  ConvD2FNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::FLOAT; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint  ideal_reg() const { return Op_RegF; }
+};
+
+//------------------------------ConvD2INode------------------------------------
+// Convert Double to Integer
+class ConvD2INode : public Node {
+  public:
+  ConvD2INode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint  ideal_reg() const { return Op_RegI; }
+};
+
+//------------------------------ConvD2LNode------------------------------------
+// Convert Double to Long
+class ConvD2LNode : public Node {
+  public:
+  ConvD2LNode( Node *dbl ) : Node(0,dbl) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeLong::LONG; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint ideal_reg() const { return Op_RegL; }
+};
+
+//------------------------------ConvF2DNode------------------------------------
+// Convert Float to a Double.
+class ConvF2DNode : public Node {
+  public:
+  ConvF2DNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::DOUBLE; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual uint  ideal_reg() const { return Op_RegD; }
+};
+
+//------------------------------ConvF2INode------------------------------------
+// Convert float to integer
+class ConvF2INode : public Node {
+  public:
+  ConvF2INode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint  ideal_reg() const { return Op_RegI; }
+};
+
+//------------------------------ConvF2LNode------------------------------------
+// Convert float to long
+class ConvF2LNode : public Node {
+  public:
+  ConvF2LNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeLong::LONG; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint  ideal_reg() const { return Op_RegL; }
+};
+
+//------------------------------ConvI2DNode------------------------------------
+// Convert Integer to Double
+class ConvI2DNode : public Node {
+  public:
+  ConvI2DNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::DOUBLE; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual uint  ideal_reg() const { return Op_RegD; }
+};
+
+//------------------------------ConvI2FNode------------------------------------
+// Convert Integer to Float
+class ConvI2FNode : public Node {
+  public:
+  ConvI2FNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::FLOAT; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual uint  ideal_reg() const { return Op_RegF; }
+};
+
+//------------------------------ConvI2LNode------------------------------------
+// Convert integer to long
+class ConvI2LNode : public TypeNode {
+  public:
+  ConvI2LNode(Node *in1, const TypeLong* t = TypeLong::INT)
+  : TypeNode(t, 2)
+  { init_req(1, in1); }
+  virtual int Opcode() const;
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint  ideal_reg() const { return Op_RegL; }
+};
+
+//------------------------------ConvL2DNode------------------------------------
+// Convert Long to Double
+class ConvL2DNode : public Node {
+  public:
+  ConvL2DNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::DOUBLE; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual uint ideal_reg() const { return Op_RegD; }
+};
+
+//------------------------------ConvL2FNode------------------------------------
+// Convert Long to Float
+class ConvL2FNode : public Node {
+  public:
+  ConvL2FNode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::FLOAT; }
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual uint  ideal_reg() const { return Op_RegF; }
+};
+
+//------------------------------ConvL2INode------------------------------------
+// Convert long to integer
+class ConvL2INode : public Node {
+  public:
+  ConvL2INode( Node *in1 ) : Node(0,in1) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint  ideal_reg() const { return Op_RegI; }
+};
+
+//-----------------------------RoundFloatNode----------------------------------
+class RoundFloatNode: public Node {
+  public:
+  RoundFloatNode(Node* c, Node *in1): Node(c, in1) {}
+  virtual int   Opcode() const;
+  virtual const Type *bottom_type() const { return Type::FLOAT; }
+  virtual uint  ideal_reg() const { return Op_RegF; }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+};
+
+
+//-----------------------------RoundDoubleNode---------------------------------
+class RoundDoubleNode: public Node {
+  public:
+  RoundDoubleNode(Node* c, Node *in1): Node(c, in1) {}
+  virtual int   Opcode() const;
+  virtual const Type *bottom_type() const { return Type::DOUBLE; }
+  virtual uint  ideal_reg() const { return Op_RegD; }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+};
+
+
+#endif // SHARE_VM_OPTO_CONVERTNODE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/countbitsnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/countbitsnode.hpp"
+#include "opto/opcodes.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/type.hpp"
+
+//------------------------------Value------------------------------------------
+const Type* CountLeadingZerosINode::Value(PhaseTransform* phase) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  const TypeInt* ti = t->isa_int();
+  if (ti && ti->is_con()) {
+    jint i = ti->get_con();
+    // HD, Figure 5-6
+    if (i == 0)
+    return TypeInt::make(BitsPerInt);
+    int n = 1;
+    unsigned int x = i;
+    if (x >> 16 == 0) { n += 16; x <<= 16; }
+    if (x >> 24 == 0) { n +=  8; x <<=  8; }
+    if (x >> 28 == 0) { n +=  4; x <<=  4; }
+    if (x >> 30 == 0) { n +=  2; x <<=  2; }
+    n -= x >> 31;
+    return TypeInt::make(n);
+  }
+  return TypeInt::INT;
+}
+
+//------------------------------Value------------------------------------------
+const Type* CountLeadingZerosLNode::Value(PhaseTransform* phase) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  const TypeLong* tl = t->isa_long();
+  if (tl && tl->is_con()) {
+    jlong l = tl->get_con();
+    // HD, Figure 5-6
+    if (l == 0)
+    return TypeInt::make(BitsPerLong);
+    int n = 1;
+    unsigned int x = (((julong) l) >> 32);
+    if (x == 0) { n += 32; x = (int) l; }
+    if (x >> 16 == 0) { n += 16; x <<= 16; }
+    if (x >> 24 == 0) { n +=  8; x <<=  8; }
+    if (x >> 28 == 0) { n +=  4; x <<=  4; }
+    if (x >> 30 == 0) { n +=  2; x <<=  2; }
+    n -= x >> 31;
+    return TypeInt::make(n);
+  }
+  return TypeInt::INT;
+}
+
+//------------------------------Value------------------------------------------
+const Type* CountTrailingZerosINode::Value(PhaseTransform* phase) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  const TypeInt* ti = t->isa_int();
+  if (ti && ti->is_con()) {
+    jint i = ti->get_con();
+    // HD, Figure 5-14
+    int y;
+    if (i == 0)
+    return TypeInt::make(BitsPerInt);
+    int n = 31;
+    y = i << 16; if (y != 0) { n = n - 16; i = y; }
+    y = i <<  8; if (y != 0) { n = n -  8; i = y; }
+    y = i <<  4; if (y != 0) { n = n -  4; i = y; }
+    y = i <<  2; if (y != 0) { n = n -  2; i = y; }
+    y = i <<  1; if (y != 0) { n = n -  1; }
+    return TypeInt::make(n);
+  }
+  return TypeInt::INT;
+}
+
+//------------------------------Value------------------------------------------
+const Type* CountTrailingZerosLNode::Value(PhaseTransform* phase) const {
+  const Type* t = phase->type(in(1));
+  if (t == Type::TOP) return Type::TOP;
+  const TypeLong* tl = t->isa_long();
+  if (tl && tl->is_con()) {
+    jlong l = tl->get_con();
+    // HD, Figure 5-14
+    int x, y;
+    if (l == 0)
+    return TypeInt::make(BitsPerLong);
+    int n = 63;
+    y = (int) l; if (y != 0) { n = n - 32; x = y; } else x = (((julong) l) >> 32);
+    y = x << 16; if (y != 0) { n = n - 16; x = y; }
+    y = x <<  8; if (y != 0) { n = n -  8; x = y; }
+    y = x <<  4; if (y != 0) { n = n -  4; x = y; }
+    y = x <<  2; if (y != 0) { n = n -  2; x = y; }
+    y = x <<  1; if (y != 0) { n = n -  1; }
+    return TypeInt::make(n);
+  }
+  return TypeInt::INT;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/countbitsnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_COUNTBITSNODE_HPP
+#define SHARE_VM_OPTO_COUNTBITSNODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+class PhaseTransform;
+
+//---------- CountBitsNode -----------------------------------------------------
+class CountBitsNode : public Node {
+  public:
+  CountBitsNode(Node* in1) : Node(0, in1) {}
+  const Type* bottom_type() const { return TypeInt::INT; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
+
+//---------- CountLeadingZerosINode --------------------------------------------
+// Count leading zeros (0-bit count starting from MSB) of an integer.
+class CountLeadingZerosINode : public CountBitsNode {
+  public:
+  CountLeadingZerosINode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+  virtual const Type* Value(PhaseTransform* phase) const;
+};
+
+//---------- CountLeadingZerosLNode --------------------------------------------
+// Count leading zeros (0-bit count starting from MSB) of a long.
+class CountLeadingZerosLNode : public CountBitsNode {
+  public:
+  CountLeadingZerosLNode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+  virtual const Type* Value(PhaseTransform* phase) const;
+};
+
+//---------- CountTrailingZerosINode -------------------------------------------
+// Count trailing zeros (0-bit count starting from LSB) of an integer.
+class CountTrailingZerosINode : public CountBitsNode {
+  public:
+  CountTrailingZerosINode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+  virtual const Type* Value(PhaseTransform* phase) const;
+};
+
+//---------- CountTrailingZerosLNode -------------------------------------------
+// Count trailing zeros (0-bit count starting from LSB) of a long.
+class CountTrailingZerosLNode : public CountBitsNode {
+  public:
+  CountTrailingZerosLNode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+  virtual const Type* Value(PhaseTransform* phase) const;
+};
+
+//---------- PopCountINode -----------------------------------------------------
+// Population count (bit count) of an integer.
+class PopCountINode : public CountBitsNode {
+  public:
+  PopCountINode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+};
+
+//---------- PopCountLNode -----------------------------------------------------
+// Population count (bit count) of a long.
+class PopCountLNode : public CountBitsNode {
+  public:
+  PopCountLNode(Node* in1) : CountBitsNode(in1) {}
+  virtual int Opcode() const;
+};
+
+
+#endif // SHARE_VM_OPTO_COUNTBITSNODE_HPP
--- a/hotspot/src/share/vm/opto/divnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/divnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -26,8 +26,10 @@
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/machnode.hpp"
+#include "opto/movenode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/phaseX.hpp"
--- a/hotspot/src/share/vm/opto/doCall.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -31,6 +31,7 @@
 #include "interpreter/linkResolver.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callGenerator.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/parse.hpp"
@@ -249,8 +250,7 @@
           }
           CallGenerator* miss_cg;
           Deoptimization::DeoptReason reason = morphism == 2 ?
-                                    Deoptimization::Reason_bimorphic :
-                                    (speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
+            Deoptimization::Reason_bimorphic : Deoptimization::reason_class_check(speculative_receiver_type != NULL);
           if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
               !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
@@ -631,13 +631,7 @@
     }
     BasicType ct = ctype->basic_type();
     if (ct == T_OBJECT || ct == T_ARRAY) {
-      ciKlass* better_type = method()->return_profiled_type(bci());
-      if (UseTypeSpeculation && better_type != NULL) {
-        // If profiling reports a single type for the return value,
-        // feed it to the type system so it can propagate it as a
-        // speculative type
-        record_profile_for_speculation(stack(sp()-1), better_type);
-      }
+      record_profiled_return_for_speculation();
     }
   }
 
--- a/hotspot/src/share/vm/opto/escape.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/escape.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -33,6 +33,7 @@
 #include "opto/compile.hpp"
 #include "opto/escape.hpp"
 #include "opto/phaseX.hpp"
+#include "opto/movenode.hpp"
 #include "opto/rootnode.hpp"
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,7 +27,7 @@
 #include "opto/callnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/compile.hpp"
-#include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/locknode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -30,10 +30,14 @@
 #include "memory/barrierSet.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/graphKit.hpp"
 #include "opto/idealKit.hpp"
+#include "opto/intrinsicnode.hpp"
 #include "opto/locknode.hpp"
 #include "opto/machnode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/parse.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
@@ -612,10 +616,10 @@
   // Usual case:  Bail to interpreter.
   // Reserve the right to recompile if we haven't seen anything yet.
 
-  assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
+  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
   if (treat_throw_as_hot
-      && (method()->method_data()->trap_recompiled_at(bci(), NULL)
+      && (method()->method_data()->trap_recompiled_at(bci(), m)
           || C->too_many_traps(reason))) {
     // We cannot afford to take more traps here.  Suffer in the interpreter.
     if (C->log() != NULL)
@@ -1181,7 +1185,8 @@
 Node* GraphKit::null_check_common(Node* value, BasicType type,
                                   // optional arguments for variations:
                                   bool assert_null,
-                                  Node* *null_control) {
+                                  Node* *null_control,
+                                  bool speculative) {
   assert(!assert_null || null_control == NULL, "not both at once");
   if (stopped())  return top();
   if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) {
@@ -1291,13 +1296,13 @@
   // Branch to failure if null
   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
   Deoptimization::DeoptReason reason;
-  if (assert_null)
+  if (assert_null) {
     reason = Deoptimization::Reason_null_assert;
-  else if (type == T_OBJECT)
-    reason = Deoptimization::Reason_null_check;
-  else
+  } else if (type == T_OBJECT) {
+    reason = Deoptimization::reason_null_check(speculative);
+  } else {
     reason = Deoptimization::Reason_div0_check;
-
+  }
   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
   // ciMethodData::has_trap_at will return a conservative -1 if any
   // must-be-null assertion has failed.  This could cause performance
@@ -2120,21 +2125,36 @@
  *
  * @param n          node that the type applies to
  * @param exact_kls  type from profiling
+ * @param maybe_null did profiling see null?
  *
  * @return           node with improved type
  */
-Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
+Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null) {
   const Type* current_type = _gvn.type(n);
   assert(UseTypeSpeculation, "type speculation must be on");
 
-  const TypeOopPtr* speculative = current_type->speculative();
-
+  const TypePtr* speculative = current_type->speculative();
+
+  // Should the klass from the profile be recorded in the speculative type?
   if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
     const TypeOopPtr* xtype = tklass->as_instance_type();
     assert(xtype->klass_is_exact(), "Should be exact");
+    // Any reason to believe n is not null (from this profiling or a previous one)?
+    const TypePtr* ptr = (maybe_null && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
     // record the new speculative type's depth
-    speculative = xtype->with_inline_depth(jvms()->depth());
+    speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
+    speculative = speculative->with_inline_depth(jvms()->depth());
+  } else if (current_type->would_improve_ptr(maybe_null)) {
+    // Profiling report that null was never seen so we can change the
+    // speculative type to non null ptr.
+    assert(!maybe_null, "nothing to improve");
+    if (speculative == NULL) {
+      speculative = TypePtr::NOTNULL;
+    } else {
+      const TypePtr* ptr = TypePtr::NOTNULL;
+      speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
+    }
   }
 
   if (speculative != current_type->speculative()) {
@@ -2167,7 +2187,15 @@
     return n;
   }
   ciKlass* exact_kls = profile_has_unique_klass();
-  return record_profile_for_speculation(n, exact_kls);
+  bool maybe_null = true;
+  if (java_bc() == Bytecodes::_checkcast ||
+      java_bc() == Bytecodes::_instanceof ||
+      java_bc() == Bytecodes::_aastore) {
+    ciProfileData* data = method()->method_data()->bci_to_data(bci());
+    bool maybe_null = data == NULL ? true : data->as_BitData()->null_seen();
+  }
+  return record_profile_for_speculation(n, exact_kls, maybe_null);
+  return n;
 }
 
 /**
@@ -2187,9 +2215,10 @@
   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
     const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
     if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
-      ciKlass* better_type = method()->argument_profiled_type(bci(), i);
-      if (better_type != NULL) {
-        record_profile_for_speculation(argument(j), better_type);
+      bool maybe_null = true;
+      ciKlass* better_type = NULL;
+      if (method()->argument_profiled_type(bci(), i, better_type, maybe_null)) {
+        record_profile_for_speculation(argument(j), better_type, maybe_null);
       }
       i++;
     }
@@ -2206,15 +2235,34 @@
   }
   for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
     if (_gvn.type(local(i))->isa_oopptr()) {
-      ciKlass* better_type = method()->parameter_profiled_type(j);
-      if (better_type != NULL) {
-        record_profile_for_speculation(local(i), better_type);
+      bool maybe_null = true;
+      ciKlass* better_type = NULL;
+      if (method()->parameter_profiled_type(j, better_type, maybe_null)) {
+        record_profile_for_speculation(local(i), better_type, maybe_null);
       }
       j++;
     }
   }
 }
 
+/**
+ * Record profiling data from return value profiling at an invoke with
+ * the type system so that it can propagate it (speculation)
+ */
+void GraphKit::record_profiled_return_for_speculation() {
+  if (!UseTypeSpeculation) {
+    return;
+  }
+  bool maybe_null = true;
+  ciKlass* better_type = NULL;
+  if (method()->return_profiled_type(bci(), better_type, maybe_null)) {
+    // If profiling reports a single type for the return value,
+    // feed it to the type system so it can propagate it as a
+    // speculative type
+    record_profile_for_speculation(stack(sp()-1), better_type, maybe_null);
+  }
+}
+
 void GraphKit::round_double_result(ciMethod* dest_method) {
   // A non-strict method may return a double value which has an extended
   // exponent, but this must not be visible in a caller which is 'strict'
@@ -2294,10 +2342,12 @@
 // Null check oop.  Set null-path control into Region in slot 3.
 // Make a cast-not-nullness use the other not-null control.  Return cast.
 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
-                               bool never_see_null, bool safe_for_replace) {
+                               bool never_see_null,
+                               bool safe_for_replace,
+                               bool speculative) {
   // Initial NULL check taken path
   (*null_control) = top();
-  Node* cast = null_check_common(value, T_OBJECT, false, null_control);
+  Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
 
   // Generate uncommon_trap:
   if (never_see_null && (*null_control) != top()) {
@@ -2308,7 +2358,8 @@
     PreserveJVMState pjvms(this);
     set_control(*null_control);
     replace_in_map(value, null());
-    uncommon_trap(Deoptimization::Reason_null_check,
+    Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
+    uncommon_trap(reason,
                   Deoptimization::Action_make_not_entrant);
     (*null_control) = top();    // NULL path is dead
   }
@@ -2732,11 +2783,16 @@
 // recompile; the offending check will be recompiled to handle NULLs.
 // If we see several offending BCIs, then all checks in the
 // method will be recompiled.
-bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
+bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
+  speculating = !_gvn.type(obj)->speculative_maybe_null();
+  Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
   if (UncommonNullCast               // Cutout for this technique
       && obj != null()               // And not the -Xcomp stupid case?
-      && !too_many_traps(Deoptimization::Reason_null_check)
+      && !too_many_traps(reason)
       ) {
+    if (speculating) {
+      return true;
+    }
     if (data == NULL)
       // Edge case:  no mature data.  Be optimistic here.
       return true;
@@ -2746,6 +2802,7 @@
            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
     return !data->as_BitData()->null_seen();
   }
+  speculating = false;
   return false;
 }
 
@@ -2758,7 +2815,7 @@
                                              bool safe_for_replace) {
   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
 
-  Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
+  Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
 
   // Make sure we haven't already deoptimized from this tactic.
   if (too_many_traps(reason))
@@ -2811,7 +2868,7 @@
   // type == NULL if profiling tells us this object is always null
   if (type != NULL) {
     Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
-    Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
+    Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
     if (!too_many_traps(null_reason) &&
         !too_many_traps(class_reason)) {
       Node* not_null_obj = NULL;
@@ -2819,7 +2876,7 @@
       // there's no need for a null check
       if (!not_null) {
         Node* null_ctl = top();
-        not_null_obj = null_check_oop(obj, &null_ctl, true, true);
+        not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
         assert(null_ctl->is_top(), "no null control here");
       } else {
         not_null_obj = obj;
@@ -2867,12 +2924,13 @@
   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
     data = method()->method_data()->bci_to_data(bci());
   }
+  bool speculative_not_null = false;
   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
-                         && seems_never_null(obj, data));
+                         && seems_never_null(obj, data, speculative_not_null));
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
@@ -2995,12 +3053,13 @@
   C->set_has_split_ifs(true); // Has chance for split-if optimization
 
   // Use null-cast information if it is available
+  bool speculative_not_null = false;
   bool never_see_null = ((failure_control == NULL)  // regular case only
-                         && seems_never_null(obj, data));
+                         && seems_never_null(obj, data, speculative_not_null));
 
   // Null check; get casted pointer; set region slot 3
   Node* null_ctl = top();
-  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
+  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
 
   // If not_null_obj is dead, only null-path is taken
   if (stopped()) {              // Doing instance-of on a NULL?
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -351,9 +351,11 @@
   // Return the value cast to not-null.
   // Be clever about equivalent dominating null checks.
   Node* null_check_common(Node* value, BasicType type,
-                          bool assert_null = false, Node* *null_control = NULL);
+                          bool assert_null = false,
+                          Node* *null_control = NULL,
+                          bool speculative = false);
   Node* null_check(Node* value, BasicType type = T_OBJECT) {
-    return null_check_common(value, type);
+    return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
   }
   Node* null_check_receiver() {
     assert(argument(0)->bottom_type()->isa_ptr(), "must be");
@@ -382,10 +384,12 @@
   // If safe_for_replace, then we can replace the value with the cast
   // in the parsing map (the cast is guaranteed to dominate the map)
   Node* null_check_oop(Node* value, Node* *null_control,
-                       bool never_see_null = false, bool safe_for_replace = false);
+                       bool never_see_null = false,
+                       bool safe_for_replace = false,
+                       bool speculative = false);
 
   // Check the null_seen bit.
-  bool seems_never_null(Node* obj, ciProfileData* data);
+  bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating);
 
   // Check for unique class for receiver at call
   ciKlass* profile_has_unique_klass() {
@@ -399,10 +403,11 @@
   }
 
   // record type from profiling with the type system
-  Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
-  Node* record_profiled_receiver_for_speculation(Node* n);
+  Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null);
   void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
   void record_profiled_parameters_for_speculation();
+  void record_profiled_return_for_speculation();
+  Node* record_profiled_receiver_for_speculation(Node* n);
 
   // Use the type profile to narrow an object type.
   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
--- a/hotspot/src/share/vm/opto/idealKit.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/idealKit.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,6 +27,7 @@
 
 #include "opto/addnode.hpp"
 #include "opto/cfgnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/connode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/graphKit.hpp"
--- a/hotspot/src/share/vm/opto/ifg.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -31,7 +31,6 @@
 #include "opto/cfgnode.hpp"
 #include "opto/chaitin.hpp"
 #include "opto/coalesce.hpp"
-#include "opto/connode.hpp"
 #include "opto/indexSet.hpp"
 #include "opto/machnode.hpp"
 #include "opto/memnode.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/intrinsicnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/intrinsicnode.hpp"
+#include "opto/memnode.hpp"
+#include "opto/phaseX.hpp"
+
+//=============================================================================
+// Do not match memory edge.
+uint StrIntrinsicNode::match_edge(uint idx) const {
+  return idx == 2 || idx == 3;
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.  Strip out
+// control copies
+Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if (remove_dead_region(phase, can_reshape)) return this;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
+
+  if (can_reshape) {
+    Node* mem = phase->transform(in(MemNode::Memory));
+    // If transformed to a MergeMem, get the desired slice
+    uint alias_idx = phase->C->get_alias_index(adr_type());
+    mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
+    if (mem != in(MemNode::Memory)) {
+      set_req(MemNode::Memory, mem);
+      return this;
+    }
+  }
+  return NULL;
+}
+
+//------------------------------Value------------------------------------------
+const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
+  if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
+  return bottom_type();
+}
+
+//=============================================================================
+//------------------------------match_edge-------------------------------------
+// Do not match memory edge
+uint EncodeISOArrayNode::match_edge(uint idx) const {
+  return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
+}
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.  Strip out
+// control copies
+Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  return remove_dead_region(phase, can_reshape) ? this : NULL;
+}
+
+//------------------------------Value------------------------------------------
+const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
+  if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
+  return bottom_type();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/intrinsicnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_INTRINSICNODE_HPP
+#define SHARE_VM_OPTO_INTRINSICNODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+
+//----------------------PartialSubtypeCheckNode--------------------------------
+// The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
+// array for an instance of the superklass.  Set a hidden internal cache on a
+// hit (cache is checked with exposed code in gen_subtype_check()).  Return
+// not zero for a miss or zero for a hit.
+class PartialSubtypeCheckNode : public Node {
+  public:
+  PartialSubtypeCheckNode(Node* c, Node* sub, Node* super) : Node(c,sub,super) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
+  virtual uint ideal_reg() const { return Op_RegP; }
+};
+
+//------------------------------StrIntrinsic-------------------------------
+// Base class for Ideal nodes used in String instrinsic code.
+class StrIntrinsicNode: public Node {
+  public:
+  StrIntrinsicNode(Node* control, Node* char_array_mem,
+                   Node* s1, Node* c1, Node* s2, Node* c2):
+  Node(control, char_array_mem, s1, c1, s2, c2) {
+  }
+
+  StrIntrinsicNode(Node* control, Node* char_array_mem,
+                   Node* s1, Node* s2, Node* c):
+  Node(control, char_array_mem, s1, s2, c) {
+  }
+
+  StrIntrinsicNode(Node* control, Node* char_array_mem,
+                   Node* s1, Node* s2):
+  Node(control, char_array_mem, s1, s2) {
+  }
+
+  virtual bool depends_only_on_test() const { return false; }
+  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
+  virtual uint match_edge(uint idx) const;
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
+};
+
+//------------------------------StrComp-------------------------------------
+class StrCompNode: public StrIntrinsicNode {
+  public:
+  StrCompNode(Node* control, Node* char_array_mem,
+              Node* s1, Node* c1, Node* s2, Node* c2):
+  StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
+  virtual int Opcode() const;
+  virtual const Type* bottom_type() const { return TypeInt::INT; }
+};
+
+//------------------------------StrEquals-------------------------------------
+class StrEqualsNode: public StrIntrinsicNode {
+  public:
+  StrEqualsNode(Node* control, Node* char_array_mem,
+                Node* s1, Node* s2, Node* c):
+  StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
+  virtual int Opcode() const;
+  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
+};
+
+//------------------------------StrIndexOf-------------------------------------
+class StrIndexOfNode: public StrIntrinsicNode {
+  public:
+  StrIndexOfNode(Node* control, Node* char_array_mem,
+                 Node* s1, Node* c1, Node* s2, Node* c2):
+  StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
+  virtual int Opcode() const;
+  virtual const Type* bottom_type() const { return TypeInt::INT; }
+};
+
+//------------------------------AryEq---------------------------------------
+class AryEqNode: public StrIntrinsicNode {
+  public:
+  AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
+  StrIntrinsicNode(control, char_array_mem, s1, s2) {};
+  virtual int Opcode() const;
+  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
+};
+
+
+//------------------------------EncodeISOArray--------------------------------
+// encode char[] to byte[] in ISO_8859_1
+class EncodeISOArrayNode: public Node {
+  public:
+  EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
+  virtual int Opcode() const;
+  virtual bool depends_only_on_test() const { return false; }
+  virtual const Type* bottom_type() const { return TypeInt::INT; }
+  virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
+  virtual uint match_edge(uint idx) const;
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
+};
+
+#endif // SHARE_VM_OPTO_INTRINSICNODE_HPP
--- a/hotspot/src/share/vm/opto/library_call.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -30,10 +30,16 @@
 #include "oops/objArrayKlass.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callGenerator.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
+#include "opto/convertnode.hpp"
+#include "opto/countbitsnode.hpp"
+#include "opto/intrinsicnode.hpp"
 #include "opto/idealKit.hpp"
 #include "opto/mathexactnode.hpp"
+#include "opto/movenode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/parse.hpp"
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
@@ -4658,7 +4664,7 @@
 
     ciKlass* src_k = NULL;
     if (!has_src) {
-      src_k = src_type->speculative_type();
+      src_k = src_type->speculative_type_not_null();
       if (src_k != NULL && src_k->is_array_klass()) {
         could_have_src = true;
       }
@@ -4666,7 +4672,7 @@
 
     ciKlass* dest_k = NULL;
     if (!has_dest) {
-      dest_k = dest_type->speculative_type();
+      dest_k = dest_type->speculative_type_not_null();
       if (dest_k != NULL && dest_k->is_array_klass()) {
         could_have_dest = true;
       }
@@ -4738,13 +4744,13 @@
     ciKlass* src_k = top_src->klass();
     ciKlass* dest_k = top_dest->klass();
     if (!src_spec) {
-      src_k = src_type->speculative_type();
+      src_k = src_type->speculative_type_not_null();
       if (src_k != NULL && src_k->is_array_klass()) {
           could_have_src = true;
       }
     }
     if (!dest_spec) {
-      dest_k = dest_type->speculative_type();
+      dest_k = dest_type->speculative_type_not_null();
       if (dest_k != NULL && dest_k->is_array_klass()) {
         could_have_dest = true;
       }
--- a/hotspot/src/share/vm/opto/loopPredicate.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/loopPredicate.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,8 +27,10 @@
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/subnode.hpp"
 
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -28,9 +28,12 @@
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
--- a/hotspot/src/share/vm/opto/loopUnswitch.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/loopUnswitch.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -25,7 +25,9 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/loopnode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 
 //================= Loop Unswitching =====================
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -30,6 +30,7 @@
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #include "opto/loopnode.hpp"
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -30,6 +30,8 @@
 #include "opto/loopnode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/subnode.hpp"
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,14 +27,17 @@
 #include "libadt/vectset.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/compile.hpp"
-#include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/locknode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/macro.hpp"
 #include "opto/memnode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/node.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/phaseX.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
--- a/hotspot/src/share/vm/opto/matcher.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -26,10 +26,10 @@
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #include "opto/matcher.hpp"
 #include "opto/memnode.hpp"
+#include "opto/movenode.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/regmask.hpp"
 #include "opto/rootnode.hpp"
--- a/hotspot/src/share/vm/opto/memnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -31,11 +31,13 @@
 #include "opto/cfgnode.hpp"
 #include "opto/compile.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/narrowptrnode.hpp"
 #include "opto/phaseX.hpp"
 #include "opto/regmask.hpp"
 
@@ -2904,59 +2906,6 @@
 }
 
 //=============================================================================
-// Do not match memory edge.
-uint StrIntrinsicNode::match_edge(uint idx) const {
-  return idx == 2 || idx == 3;
-}
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.  Strip out
-// control copies
-Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  if (remove_dead_region(phase, can_reshape)) return this;
-  // Don't bother trying to transform a dead node
-  if (in(0) && in(0)->is_top())  return NULL;
-
-  if (can_reshape) {
-    Node* mem = phase->transform(in(MemNode::Memory));
-    // If transformed to a MergeMem, get the desired slice
-    uint alias_idx = phase->C->get_alias_index(adr_type());
-    mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem;
-    if (mem != in(MemNode::Memory)) {
-      set_req(MemNode::Memory, mem);
-      return this;
-    }
-  }
-  return NULL;
-}
-
-//------------------------------Value------------------------------------------
-const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
-  if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
-  return bottom_type();
-}
-
-//=============================================================================
-//------------------------------match_edge-------------------------------------
-// Do not match memory edge
-uint EncodeISOArrayNode::match_edge(uint idx) const {
-  return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len)
-}
-
-//------------------------------Ideal------------------------------------------
-// Return a node which is more "ideal" than the current node.  Strip out
-// control copies
-Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
-  return remove_dead_region(phase, can_reshape) ? this : NULL;
-}
-
-//------------------------------Value------------------------------------------
-const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const {
-  if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
-  return bottom_type();
-}
-
-//=============================================================================
 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
     _adr_type(C->get_adr_type(alias_idx))
--- a/hotspot/src/share/vm/opto/memnode.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -866,88 +866,6 @@
   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
 };
 
-//------------------------------StrIntrinsic-------------------------------
-// Base class for Ideal nodes used in String instrinsic code.
-class StrIntrinsicNode: public Node {
-public:
-  StrIntrinsicNode(Node* control, Node* char_array_mem,
-                   Node* s1, Node* c1, Node* s2, Node* c2):
-    Node(control, char_array_mem, s1, c1, s2, c2) {
-  }
-
-  StrIntrinsicNode(Node* control, Node* char_array_mem,
-                   Node* s1, Node* s2, Node* c):
-    Node(control, char_array_mem, s1, s2, c) {
-  }
-
-  StrIntrinsicNode(Node* control, Node* char_array_mem,
-                   Node* s1, Node* s2):
-    Node(control, char_array_mem, s1, s2) {
-  }
-
-  virtual bool depends_only_on_test() const { return false; }
-  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
-  virtual uint match_edge(uint idx) const;
-  virtual uint ideal_reg() const { return Op_RegI; }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual const Type *Value(PhaseTransform *phase) const;
-};
-
-//------------------------------StrComp-------------------------------------
-class StrCompNode: public StrIntrinsicNode {
-public:
-  StrCompNode(Node* control, Node* char_array_mem,
-              Node* s1, Node* c1, Node* s2, Node* c2):
-    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
-  virtual int Opcode() const;
-  virtual const Type* bottom_type() const { return TypeInt::INT; }
-};
-
-//------------------------------StrEquals-------------------------------------
-class StrEqualsNode: public StrIntrinsicNode {
-public:
-  StrEqualsNode(Node* control, Node* char_array_mem,
-                Node* s1, Node* s2, Node* c):
-    StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
-  virtual int Opcode() const;
-  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
-};
-
-//------------------------------StrIndexOf-------------------------------------
-class StrIndexOfNode: public StrIntrinsicNode {
-public:
-  StrIndexOfNode(Node* control, Node* char_array_mem,
-              Node* s1, Node* c1, Node* s2, Node* c2):
-    StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
-  virtual int Opcode() const;
-  virtual const Type* bottom_type() const { return TypeInt::INT; }
-};
-
-//------------------------------AryEq---------------------------------------
-class AryEqNode: public StrIntrinsicNode {
-public:
-  AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
-    StrIntrinsicNode(control, char_array_mem, s1, s2) {};
-  virtual int Opcode() const;
-  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
-};
-
-
-//------------------------------EncodeISOArray--------------------------------
-// encode char[] to byte[] in ISO_8859_1
-class EncodeISOArrayNode: public Node {
-public:
-  EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
-  virtual int Opcode() const;
-  virtual bool depends_only_on_test() const { return false; }
-  virtual const Type* bottom_type() const { return TypeInt::INT; }
-  virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
-  virtual uint match_edge(uint idx) const;
-  virtual uint ideal_reg() const { return Op_RegI; }
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
-  virtual const Type *Value(PhaseTransform *phase) const;
-};
-
 //------------------------------MemBar-----------------------------------------
 // There are different flavors of Memory Barriers to match the Java Memory
 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/movenode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/addnode.hpp"
+#include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/subnode.hpp"
+
+//=============================================================================
+/*
+ The major change is for CMoveP and StrComp.  They have related but slightly
+ different problems.  They both take in TWO oops which are both null-checked
+ independently before the using Node.  After CCP removes the CastPP's they need
+ to pick up the guarding test edge - in this case TWO control edges.  I tried
+ various solutions, all have problems:
+
+ (1) Do nothing.  This leads to a bug where we hoist a Load from a CMoveP or a
+ StrComp above a guarding null check.  I've seen both cases in normal -Xcomp
+ testing.
+
+ (2) Plug the control edge from 1 of the 2 oops in.  Apparent problem here is
+ to figure out which test post-dominates.  The real problem is that it doesn't
+ matter which one you pick.  After you pick up, the dominating-test elider in
+ IGVN can remove the test and allow you to hoist up to the dominating test on
+ the chosen oop bypassing the test on the not-chosen oop.  Seen in testing.
+ Oops.
+
+ (3) Leave the CastPP's in.  This makes the graph more accurate in some sense;
+ we get to keep around the knowledge that an oop is not-null after some test.
+ Alas, the CastPP's interfere with GVN (some values are the regular oop, some
+ are the CastPP of the oop, all merge at Phi's which cannot collapse, etc).
+ This cost us 10% on SpecJVM, even when I removed some of the more trivial
+ cases in the optimizer.  Removing more useless Phi's started allowing Loads to
+ illegally float above null checks.  I gave up on this approach.
+
+ (4) Add BOTH control edges to both tests.  Alas, too much code knows that
+ control edges are in slot-zero ONLY.  Many quick asserts fail; no way to do
+ this one.  Note that I really want to allow the CMoveP to float and add both
+ control edges to the dependent Load op - meaning I can select early but I
+ cannot Load until I pass both tests.
+
+ (5) Do not hoist CMoveP and StrComp.  To this end I added the v-call
+ depends_only_on_test().  No obvious performance loss on Spec, but we are
+ clearly conservative on CMoveP (also so on StrComp but that's unlikely to
+ matter ever).
+
+ */
+
+
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+// Move constants to the right.
+Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
+  // Don't bother trying to transform a dead node
+  if( in(0) && in(0)->is_top() )  return NULL;
+  assert( !phase->eqv(in(Condition), this) &&
+         !phase->eqv(in(IfFalse), this) &&
+         !phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
+  if( phase->type(in(Condition)) == Type::TOP )
+  return NULL; // return NULL when Condition is dead
+
+  if( in(IfFalse)->is_Con() && !in(IfTrue)->is_Con() ) {
+    if( in(Condition)->is_Bool() ) {
+      BoolNode* b  = in(Condition)->as_Bool();
+      BoolNode* b2 = b->negate(phase);
+      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+    }
+  }
+  return NULL;
+}
+
+//------------------------------is_cmove_id------------------------------------
+// Helper function to check for CMOVE identity.  Shared with PhiNode::Identity
+Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b ) {
+  // Check for Cmp'ing and CMove'ing same values
+  if( (phase->eqv(cmp->in(1),f) &&
+       phase->eqv(cmp->in(2),t)) ||
+     // Swapped Cmp is OK
+     (phase->eqv(cmp->in(2),f) &&
+      phase->eqv(cmp->in(1),t)) ) {
+       // Give up this identity check for floating points because it may choose incorrect
+       // value around 0.0 and -0.0
+       if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD )
+       return NULL;
+       // Check for "(t==f)?t:f;" and replace with "f"
+       if( b->_test._test == BoolTest::eq )
+       return f;
+       // Allow the inverted case as well
+       // Check for "(t!=f)?t:f;" and replace with "t"
+       if( b->_test._test == BoolTest::ne )
+       return t;
+     }
+  return NULL;
+}
+
+//------------------------------Identity---------------------------------------
+// Conditional-move is an identity if both inputs are the same, or the test
+// true or false.
+Node *CMoveNode::Identity( PhaseTransform *phase ) {
+  if( phase->eqv(in(IfFalse),in(IfTrue)) ) // C-moving identical inputs?
+  return in(IfFalse);         // Then it doesn't matter
+  if( phase->type(in(Condition)) == TypeInt::ZERO )
+  return in(IfFalse);         // Always pick left(false) input
+  if( phase->type(in(Condition)) == TypeInt::ONE )
+  return in(IfTrue);          // Always pick right(true) input
+
+  // Check for CMove'ing a constant after comparing against the constant.
+  // Happens all the time now, since if we compare equality vs a constant in
+  // the parser, we "know" the variable is constant on one path and we force
+  // it.  Thus code like "if( x==0 ) {/*EMPTY*/}" ends up inserting a
+  // conditional move: "x = (x==0)?0:x;".  Yucko.  This fix is slightly more
+  // general in that we don't need constants.
+  if( in(Condition)->is_Bool() ) {
+    BoolNode *b = in(Condition)->as_Bool();
+    Node *cmp = b->in(1);
+    if( cmp->is_Cmp() ) {
+      Node *id = is_cmove_id( phase, cmp, in(IfTrue), in(IfFalse), b );
+      if( id ) return id;
+    }
+  }
+
+  return this;
+}
+
+//------------------------------Value------------------------------------------
+// Result is the meet of inputs
+const Type *CMoveNode::Value( PhaseTransform *phase ) const {
+  if( phase->type(in(Condition)) == Type::TOP )
+  return Type::TOP;
+  return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
+}
+
+//------------------------------make-------------------------------------------
+// Make a correctly-flavored CMove.  Since _type is directly determined
+// from the inputs we do not need to specify it here.
+CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
+  switch( t->basic_type() ) {
+    case T_INT:     return new (C) CMoveINode( bol, left, right, t->is_int() );
+    case T_FLOAT:   return new (C) CMoveFNode( bol, left, right, t );
+    case T_DOUBLE:  return new (C) CMoveDNode( bol, left, right, t );
+    case T_LONG:    return new (C) CMoveLNode( bol, left, right, t->is_long() );
+    case T_OBJECT:  return new (C) CMovePNode( c, bol, left, right, t->is_oopptr() );
+    case T_ADDRESS: return new (C) CMovePNode( c, bol, left, right, t->is_ptr() );
+    case T_NARROWOOP: return new (C) CMoveNNode( c, bol, left, right, t );
+    default:
+    ShouldNotReachHere();
+    return NULL;
+  }
+}
+
+//=============================================================================
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+// Check for conversions to boolean
+Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  // Try generic ideal's first
+  Node *x = CMoveNode::Ideal(phase, can_reshape);
+  if( x ) return x;
+
+  // If zero is on the left (false-case, no-move-case) it must mean another
+  // constant is on the right (otherwise the shared CMove::Ideal code would
+  // have moved the constant to the right).  This situation is bad for Intel
+  // and a don't-care for Sparc.  It's bad for Intel because the zero has to
+  // be manifested in a register with a XOR which kills flags, which are live
+  // on input to the CMoveI, leading to a situation which causes excessive
+  // spilling on Intel.  For Sparc, if the zero in on the left the Sparc will
+  // zero a register via G0 and conditionally-move the other constant.  If the
+  // zero is on the right, the Sparc will load the first constant with a
+  // 13-bit set-lo and conditionally move G0.  See bug 4677505.
+  if( phase->type(in(IfFalse)) == TypeInt::ZERO && !(phase->type(in(IfTrue)) == TypeInt::ZERO) ) {
+    if( in(Condition)->is_Bool() ) {
+      BoolNode* b  = in(Condition)->as_Bool();
+      BoolNode* b2 = b->negate(phase);
+      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+    }
+  }
+
+  // Now check for booleans
+  int flip = 0;
+
+  // Check for picking from zero/one
+  if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) {
+    flip = 1 - flip;
+  } else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) {
+  } else return NULL;
+
+  // Check for eq/ne test
+  if( !in(1)->is_Bool() ) return NULL;
+  BoolNode *bol = in(1)->as_Bool();
+  if( bol->_test._test == BoolTest::eq ) {
+  } else if( bol->_test._test == BoolTest::ne ) {
+    flip = 1-flip;
+  } else return NULL;
+
+  // Check for vs 0 or 1
+  if( !bol->in(1)->is_Cmp() ) return NULL;
+  const CmpNode *cmp = bol->in(1)->as_Cmp();
+  if( phase->type(cmp->in(2)) == TypeInt::ZERO ) {
+  } else if( phase->type(cmp->in(2)) == TypeInt::ONE ) {
+    // Allow cmp-vs-1 if the other input is bounded by 0-1
+    if( phase->type(cmp->in(1)) != TypeInt::BOOL )
+    return NULL;
+    flip = 1 - flip;
+  } else return NULL;
+
+  // Convert to a bool (flipped)
+  // Build int->bool conversion
+#ifndef PRODUCT
+  if( PrintOpto ) tty->print_cr("CMOV to I2B");
+#endif
+  Node *n = new (phase->C) Conv2BNode( cmp->in(1) );
+  if( flip )
+  n = new (phase->C) XorINode( phase->transform(n), phase->intcon(1) );
+
+  return n;
+}
+
+//=============================================================================
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+// Check for absolute value
+Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  // Try generic ideal's first
+  Node *x = CMoveNode::Ideal(phase, can_reshape);
+  if( x ) return x;
+
+  int  cmp_zero_idx = 0;        // Index of compare input where to look for zero
+  int  phi_x_idx = 0;           // Index of phi input where to find naked x
+
+  // Find the Bool
+  if( !in(1)->is_Bool() ) return NULL;
+  BoolNode *bol = in(1)->as_Bool();
+  // Check bool sense
+  switch( bol->_test._test ) {
+    case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue;  break;
+    case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
+    case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue;  break;
+    case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
+    default:           return NULL;                           break;
+  }
+
+  // Find zero input of CmpF; the other input is being abs'd
+  Node *cmpf = bol->in(1);
+  if( cmpf->Opcode() != Op_CmpF ) return NULL;
+  Node *X = NULL;
+  bool flip = false;
+  if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) {
+    X = cmpf->in(3 - cmp_zero_idx);
+  } else if (phase->type(cmpf->in(3 - cmp_zero_idx)) == TypeF::ZERO) {
+    // The test is inverted, we should invert the result...
+    X = cmpf->in(cmp_zero_idx);
+    flip = true;
+  } else {
+    return NULL;
+  }
+
+  // If X is found on the appropriate phi input, find the subtract on the other
+  if( X != in(phi_x_idx) ) return NULL;
+  int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
+  Node *sub = in(phi_sub_idx);
+
+  // Allow only SubF(0,X) and fail out for all others; NegF is not OK
+  if( sub->Opcode() != Op_SubF ||
+     sub->in(2) != X ||
+     phase->type(sub->in(1)) != TypeF::ZERO ) return NULL;
+
+  Node *abs = new (phase->C) AbsFNode( X );
+  if( flip )
+  abs = new (phase->C) SubFNode(sub->in(1), phase->transform(abs));
+
+  return abs;
+}
+
+//=============================================================================
+//------------------------------Ideal------------------------------------------
+// Return a node which is more "ideal" than the current node.
+// Check for absolute value
+Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  // Try generic ideal's first
+  Node *x = CMoveNode::Ideal(phase, can_reshape);
+  if( x ) return x;
+
+  int  cmp_zero_idx = 0;        // Index of compare input where to look for zero
+  int  phi_x_idx = 0;           // Index of phi input where to find naked x
+
+  // Find the Bool
+  if( !in(1)->is_Bool() ) return NULL;
+  BoolNode *bol = in(1)->as_Bool();
+  // Check bool sense
+  switch( bol->_test._test ) {
+    case BoolTest::lt: cmp_zero_idx = 1; phi_x_idx = IfTrue;  break;
+    case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break;
+    case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue;  break;
+    case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break;
+    default:           return NULL;                           break;
+  }
+
+  // Find zero input of CmpD; the other input is being abs'd
+  Node *cmpd = bol->in(1);
+  if( cmpd->Opcode() != Op_CmpD ) return NULL;
+  Node *X = NULL;
+  bool flip = false;
+  if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) {
+    X = cmpd->in(3 - cmp_zero_idx);
+  } else if (phase->type(cmpd->in(3 - cmp_zero_idx)) == TypeD::ZERO) {
+    // The test is inverted, we should invert the result...
+    X = cmpd->in(cmp_zero_idx);
+    flip = true;
+  } else {
+    return NULL;
+  }
+
+  // If X is found on the appropriate phi input, find the subtract on the other
+  if( X != in(phi_x_idx) ) return NULL;
+  int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue;
+  Node *sub = in(phi_sub_idx);
+
+  // Allow only SubD(0,X) and fail out for all others; NegD is not OK
+  if( sub->Opcode() != Op_SubD ||
+     sub->in(2) != X ||
+     phase->type(sub->in(1)) != TypeD::ZERO ) return NULL;
+
+  Node *abs = new (phase->C) AbsDNode( X );
+  if( flip )
+  abs = new (phase->C) SubDNode(sub->in(1), phase->transform(abs));
+
+  return abs;
+}
+
+//------------------------------Value------------------------------------------
+const Type *MoveL2DNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeLong *tl = t->is_long();
+  if( !tl->is_con() ) return bottom_type();
+  JavaValue v;
+  v.set_jlong(tl->get_con());
+  return TypeD::make( v.get_jdouble() );
+}
+
+//------------------------------Value------------------------------------------
+const Type *MoveI2FNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  const TypeInt *ti = t->is_int();
+  if( !ti->is_con() )   return bottom_type();
+  JavaValue v;
+  v.set_jint(ti->get_con());
+  return TypeF::make( v.get_jfloat() );
+}
+
+//------------------------------Value------------------------------------------
+const Type *MoveF2INode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP )       return Type::TOP;
+  if( t == Type::FLOAT ) return TypeInt::INT;
+  const TypeF *tf = t->is_float_constant();
+  JavaValue v;
+  v.set_jfloat(tf->getf());
+  return TypeInt::make( v.get_jint() );
+}
+
+//------------------------------Value------------------------------------------
+const Type *MoveD2LNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return Type::TOP;
+  if( t == Type::DOUBLE ) return TypeLong::LONG;
+  const TypeD *td = t->is_double_constant();
+  JavaValue v;
+  v.set_jdouble(td->getd());
+  return TypeLong::make( v.get_jlong() );
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/movenode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_MOVENODE_HPP
+#define SHARE_VM_OPTO_MOVENODE_HPP
+
+#include "opto/node.hpp"
+
+//------------------------------CMoveNode--------------------------------------
+// Conditional move
+class CMoveNode : public TypeNode {
+  public:
+  enum { Control,               // When is it safe to do this cmove?
+    Condition,             // Condition controlling the cmove
+    IfFalse,               // Value if condition is false
+    IfTrue };              // Value if condition is true
+  CMoveNode( Node *bol, Node *left, Node *right, const Type *t ) : TypeNode(t,4)
+  {
+    init_class_id(Class_CMove);
+    // all inputs are nullified in Node::Node(int)
+    // init_req(Control,NULL);
+    init_req(Condition,bol);
+    init_req(IfFalse,left);
+    init_req(IfTrue,right);
+  }
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+  static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
+  // Helper function to spot cmove graph shapes
+  static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
+};
+
+//------------------------------CMoveDNode-------------------------------------
+class CMoveDNode : public CMoveNode {
+  public:
+  CMoveDNode( Node *bol, Node *left, Node *right, const Type* t) : CMoveNode(bol,left,right,t){}
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+};
+
+//------------------------------CMoveFNode-------------------------------------
+class CMoveFNode : public CMoveNode {
+  public:
+  CMoveFNode( Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) {}
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+};
+
+//------------------------------CMoveINode-------------------------------------
+class CMoveINode : public CMoveNode {
+  public:
+  CMoveINode( Node *bol, Node *left, Node *right, const TypeInt *ti ) : CMoveNode(bol,left,right,ti){}
+  virtual int Opcode() const;
+  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+};
+
+//------------------------------CMoveLNode-------------------------------------
+class CMoveLNode : public CMoveNode {
+  public:
+  CMoveLNode(Node *bol, Node *left, Node *right, const TypeLong *tl ) : CMoveNode(bol,left,right,tl){}
+  virtual int Opcode() const;
+};
+
+//------------------------------CMovePNode-------------------------------------
+class CMovePNode : public CMoveNode {
+  public:
+  CMovePNode( Node *c, Node *bol, Node *left, Node *right, const TypePtr* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
+  virtual int Opcode() const;
+};
+
+//------------------------------CMoveNNode-------------------------------------
+class CMoveNNode : public CMoveNode {
+  public:
+  CMoveNNode( Node *c, Node *bol, Node *left, Node *right, const Type* t ) : CMoveNode(bol,left,right,t) { init_req(Control,c); }
+  virtual int Opcode() const;
+};
+
+//
+class MoveI2FNode : public Node {
+  public:
+  MoveI2FNode( Node *value ) : Node(0,value) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::FLOAT; }
+  virtual uint ideal_reg() const { return Op_RegF; }
+  virtual const Type* Value( PhaseTransform *phase ) const;
+};
+
+class MoveL2DNode : public Node {
+  public:
+  MoveL2DNode( Node *value ) : Node(0,value) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return Type::DOUBLE; }
+  virtual uint ideal_reg() const { return Op_RegD; }
+  virtual const Type* Value( PhaseTransform *phase ) const;
+};
+
+class MoveF2INode : public Node {
+  public:
+  MoveF2INode( Node *value ) : Node(0,value) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+  virtual const Type* Value( PhaseTransform *phase ) const;
+};
+
+class MoveD2LNode : public Node {
+  public:
+  MoveD2LNode( Node *value ) : Node(0,value) {}
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeLong::LONG; }
+  virtual uint ideal_reg() const { return Op_RegL; }
+  virtual const Type* Value( PhaseTransform *phase ) const;
+};
+
+//------------------------------BinaryNode-------------------------------------
+// Place holder for the 2 conditional inputs to a CMove.  CMove needs 4
+// inputs: the Bool (for the lt/gt/eq/ne bits), the flags (result of some
+// compare), and the 2 values to select between.  The Matcher requires a
+// binary tree so we break it down like this:
+//     (CMove (Binary bol cmp) (Binary src1 src2))
+class BinaryNode : public Node {
+  public:
+  BinaryNode( Node *n1, Node *n2 ) : Node(0,n1,n2) { }
+  virtual int Opcode() const;
+  virtual uint ideal_reg() const { return 0; }
+};
+
+
+#endif // SHARE_VM_OPTO_MOVENODE_HPP
+
--- a/hotspot/src/share/vm/opto/mulnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/mulnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -26,6 +26,7 @@
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
 #include "opto/connode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/phaseX.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/narrowptrnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/narrowptrnode.hpp"
+#include "opto/phaseX.hpp"
+
+Node* DecodeNNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->is_EncodeP()) {
+    // (DecodeN (EncodeP p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+const Type *DecodeNNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if (t == Type::TOP) return Type::TOP;
+  if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR;
+
+  assert(t->isa_narrowoop(), "only  narrowoop here");
+  return t->make_ptr();
+}
+
+Node* EncodePNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->is_DecodeN()) {
+    // (EncodeP (DecodeN p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+const Type *EncodePNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if (t == Type::TOP) return Type::TOP;
+  if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR;
+
+  assert(t->isa_oop_ptr(), "only oopptr here");
+  return t->make_narrowoop();
+}
+
+
+Node *EncodeNarrowPtrNode::Ideal_DU_postCCP( PhaseCCP *ccp ) {
+  return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1));
+}
+
+Node* DecodeNKlassNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->is_EncodePKlass()) {
+    // (DecodeNKlass (EncodePKlass p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+const Type *DecodeNKlassNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if (t == Type::TOP) return Type::TOP;
+  assert(t != TypeNarrowKlass::NULL_PTR, "null klass?");
+
+  assert(t->isa_narrowklass(), "only narrow klass ptr here");
+  return t->make_ptr();
+}
+
+Node* EncodePKlassNode::Identity(PhaseTransform* phase) {
+  const Type *t = phase->type( in(1) );
+  if( t == Type::TOP ) return in(1);
+
+  if (in(1)->is_DecodeNKlass()) {
+    // (EncodePKlass (DecodeNKlass p)) -> p
+    return in(1)->in(1);
+  }
+  return this;
+}
+
+const Type *EncodePKlassNode::Value( PhaseTransform *phase ) const {
+  const Type *t = phase->type( in(1) );
+  if (t == Type::TOP) return Type::TOP;
+  assert (t != TypePtr::NULL_PTR, "null klass?");
+
+  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
+  return t->make_narrowklass();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/narrowptrnode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_NARROWPTRNODE_HPP
+#define SHARE_VM_OPTO_NARROWPTRNODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+//------------------------------EncodeNarrowPtr--------------------------------
+class EncodeNarrowPtrNode : public TypeNode {
+  protected:
+  EncodeNarrowPtrNode(Node* value, const Type* type):
+  TypeNode(type, 2) {
+    init_class_id(Class_EncodeNarrowPtr);
+    init_req(0, NULL);
+    init_req(1, value);
+  }
+  public:
+  virtual uint  ideal_reg() const { return Op_RegN; }
+  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
+};
+
+//------------------------------EncodeP--------------------------------
+// Encodes an oop pointers into its compressed form
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class EncodePNode : public EncodeNarrowPtrNode {
+  public:
+  EncodePNode(Node* value, const Type* type):
+  EncodeNarrowPtrNode(value, type) {
+    init_class_id(Class_EncodeP);
+  }
+  virtual int Opcode() const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+};
+
+//------------------------------EncodePKlass--------------------------------
+// Encodes a klass pointer into its compressed form
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class EncodePKlassNode : public EncodeNarrowPtrNode {
+  public:
+  EncodePKlassNode(Node* value, const Type* type):
+  EncodeNarrowPtrNode(value, type) {
+    init_class_id(Class_EncodePKlass);
+  }
+  virtual int Opcode() const;
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+};
+
+//------------------------------DecodeNarrowPtr--------------------------------
+class DecodeNarrowPtrNode : public TypeNode {
+  protected:
+  DecodeNarrowPtrNode(Node* value, const Type* type):
+  TypeNode(type, 2) {
+    init_class_id(Class_DecodeNarrowPtr);
+    init_req(0, NULL);
+    init_req(1, value);
+  }
+  public:
+  virtual uint  ideal_reg() const { return Op_RegP; }
+};
+
+//------------------------------DecodeN--------------------------------
+// Converts a narrow oop into a real oop ptr.
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class DecodeNNode : public DecodeNarrowPtrNode {
+  public:
+  DecodeNNode(Node* value, const Type* type):
+  DecodeNarrowPtrNode(value, type) {
+    init_class_id(Class_DecodeN);
+  }
+  virtual int Opcode() const;
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+};
+
+//------------------------------DecodeNKlass--------------------------------
+// Converts a narrow klass pointer into a real klass ptr.
+// Takes an extra argument which is the real heap base as a long which
+// may be useful for code generation in the backend.
+class DecodeNKlassNode : public DecodeNarrowPtrNode {
+  public:
+  DecodeNKlassNode(Node* value, const Type* type):
+  DecodeNarrowPtrNode(value, type) {
+    init_class_id(Class_DecodeNKlass);
+  }
+  virtual int Opcode() const;
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Identity( PhaseTransform *phase );
+};
+
+#endif // SHARE_VM_OPTO_NARROWPTRNODE_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/opaquenode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/opaquenode.hpp"
+#include "opto/phaseX.hpp"
+
+//=============================================================================
+// Do not allow value-numbering
+uint Opaque1Node::hash() const { return NO_HASH; }
+uint Opaque1Node::cmp( const Node &n ) const {
+  return (&n == this);          // Always fail except on self
+}
+
+//------------------------------Identity---------------------------------------
+// If _major_progress, then more loop optimizations follow.  Do NOT remove
+// the opaque Node until no more loop ops can happen.  Note the timing of
+// _major_progress; it's set in the major loop optimizations THEN comes the
+// call to IterGVN and any chance of hitting this code.  Hence there's no
+// phase-ordering problem with stripping Opaque1 in IGVN followed by some
+// more loop optimizations that require it.
+Node *Opaque1Node::Identity( PhaseTransform *phase ) {
+  return phase->C->major_progress() ? this : in(1);
+}
+
+//=============================================================================
+// A node to prevent unwanted optimizations.  Allows constant folding.  Stops
+// value-numbering, most Ideal calls or Identity functions.  This Node is
+// specifically designed to prevent the pre-increment value of a loop trip
+// counter from being live out of the bottom of the loop (hence causing the
+// pre- and post-increment values both being live and thus requiring an extra
+// temp register and an extra move).  If we "accidentally" optimize through
+// this kind of a Node, we'll get slightly pessimal, but correct, code.  Thus
+// it's OK to be slightly sloppy on optimizations here.
+
+// Do not allow value-numbering
+uint Opaque2Node::hash() const { return NO_HASH; }
+uint Opaque2Node::cmp( const Node &n ) const {
+  return (&n == this);          // Always fail except on self
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/opaquenode.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_OPAQUENODE_HPP
+#define SHARE_VM_OPTO_OPAQUENODE_HPP
+
+#include "opto/node.hpp"
+#include "opto/opcodes.hpp"
+
+//------------------------------Opaque1Node------------------------------------
+// A node to prevent unwanted optimizations.  Allows constant folding.
+// Stops value-numbering, Ideal calls or Identity functions.
+class Opaque1Node : public Node {
+  virtual uint hash() const ;                  // { return NO_HASH; }
+  virtual uint cmp( const Node &n ) const;
+  public:
+  Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
+    // Put it on the Macro nodes list to removed during macro nodes expansion.
+    init_flags(Flag_is_macro);
+    C->add_macro_node(this);
+  }
+  // Special version for the pre-loop to hold the original loop limit
+  // which is consumed by range check elimination.
+  Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
+    // Put it on the Macro nodes list to removed during macro nodes expansion.
+    init_flags(Flag_is_macro);
+    C->add_macro_node(this);
+  }
+  Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+  virtual Node *Identity( PhaseTransform *phase );
+};
+
+//------------------------------Opaque2Node------------------------------------
+// A node to prevent unwanted optimizations.  Allows constant folding.  Stops
+// value-numbering, most Ideal calls or Identity functions.  This Node is
+// specifically designed to prevent the pre-increment value of a loop trip
+// counter from being live out of the bottom of the loop (hence causing the
+// pre- and post-increment values both being live and thus requiring an extra
+// temp register and an extra move).  If we "accidentally" optimize through
+// this kind of a Node, we'll get slightly pessimal, but correct, code.  Thus
+// it's OK to be slightly sloppy on optimizations here.
+class Opaque2Node : public Node {
+  virtual uint hash() const ;                  // { return NO_HASH; }
+  virtual uint cmp( const Node &n ) const;
+  public:
+  Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
+    // Put it on the Macro nodes list to removed during macro nodes expansion.
+    init_flags(Flag_is_macro);
+    C->add_macro_node(this);
+  }
+  virtual int Opcode() const;
+  virtual const Type *bottom_type() const { return TypeInt::INT; }
+};
+
+//------------------------------Opaque3Node------------------------------------
+// A node to prevent unwanted optimizations. Will be optimized only during
+// macro nodes expansion.
+class Opaque3Node : public Opaque2Node {
+  int _opt; // what optimization it was used for
+  public:
+  enum { RTM_OPT };
+  Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
+  virtual int Opcode() const;
+  bool rtm_opt() const { return (_opt == RTM_OPT); }
+};
+
+#endif // SHARE_VM_OPTO_OPAQUENODE_HPP
+
--- a/hotspot/src/share/vm/opto/parse1.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,9 +27,11 @@
 #include "interpreter/linkResolver.hpp"
 #include "oops/method.hpp"
 #include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #include "opto/locknode.hpp"
 #include "opto/memnode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/parse.hpp"
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
--- a/hotspot/src/share/vm/opto/parse2.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -30,6 +30,8 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/universe.inline.hpp"
 #include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #include "opto/matcher.hpp"
@@ -1288,7 +1290,7 @@
               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
             TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
             const Type* tcc = ccast->as_Type()->type();
-            assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
+            assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
             // Delay transform() call to allow recovery of pre-cast value
             // at the control merge.
             _gvn.set_type_bottom(ccast);
@@ -1352,7 +1354,7 @@
 
   if (ccast != NULL) {
     const Type* tcc = ccast->as_Type()->type();
-    assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
+    assert(tcc != tval && tcc->higher_equal(tval), "must improve");
     // Delay transform() call to allow recovery of pre-cast value
     // at the control merge.
     ccast->set_req(0, control());
@@ -1393,7 +1395,7 @@
       Node* addp = load_klass->in(2);
       Node* obj = addp->in(AddPNode::Address);
       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
-      if (obj_type->speculative_type() != NULL) {
+      if (obj_type->speculative_type_not_null() != NULL) {
         ciKlass* k = obj_type->speculative_type();
         inc_sp(2);
         obj = maybe_cast_profiled_obj(obj, k);
@@ -2277,6 +2279,14 @@
     maybe_add_safepoint(iter().get_dest());
     a = null();
     b = pop();
+    if (!_gvn.type(b)->speculative_maybe_null() &&
+        !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
+      inc_sp(1);
+      Node* null_ctl = top();
+      b = null_check_oop(b, &null_ctl, true, true, true);
+      assert(null_ctl->is_top(), "no null control here");
+      dec_sp(1);
+    }
     c = _gvn.transform( new (C) CmpPNode(b, a) );
     do_ifnull(btest, c);
     break;
--- a/hotspot/src/share/vm/opto/parse3.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/parse3.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -28,6 +28,7 @@
 #include "memory/universe.inline.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/addnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/memnode.hpp"
 #include "opto/parse.hpp"
 #include "opto/rootnode.hpp"
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,7 +27,6 @@
 #include "opto/block.hpp"
 #include "opto/callnode.hpp"
 #include "opto/cfgnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
@@ -330,7 +329,7 @@
   Node *sentinel_node = sentinel();
   for (uint i = 0; i < max; ++i) {
     Node *n = at(i);
-    if(n != NULL && n != sentinel_node && n->is_Type()) {
+    if(n != NULL && n != sentinel_node && n->is_Type() && n->outcnt() > 0) {
       TypeNode* tn = n->as_Type();
       const Type* t = tn->type();
       const Type* t_no_spec = t->remove_speculative();
--- a/hotspot/src/share/vm/opto/runtime.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -48,7 +48,6 @@
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/cfgnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/graphKit.hpp"
 #include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
--- a/hotspot/src/share/vm/opto/split_if.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/split_if.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -25,8 +25,8 @@
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
 #include "opto/callnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/loopnode.hpp"
+#include "opto/movenode.hpp"
 
 
 //------------------------------split_thru_region------------------------------
--- a/hotspot/src/share/vm/opto/subnode.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -28,9 +28,9 @@
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
 #include "opto/cfgnode.hpp"
-#include "opto/connode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/matcher.hpp"
+#include "opto/movenode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/phaseX.hpp"
--- a/hotspot/src/share/vm/opto/superword.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/superword.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -27,11 +27,14 @@
 #include "memory/allocation.inline.hpp"
 #include "opto/addnode.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/memnode.hpp"
 #include "opto/mulnode.hpp"
 #include "opto/opcodes.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/superword.hpp"
 #include "opto/vectornode.hpp"
 
--- a/hotspot/src/share/vm/opto/superword.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/superword.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -24,7 +24,6 @@
 #ifndef SHARE_VM_OPTO_SUPERWORD_HPP
 #define SHARE_VM_OPTO_SUPERWORD_HPP
 
-#include "opto/connode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/node.hpp"
 #include "opto/phaseX.hpp"
--- a/hotspot/src/share/vm/opto/type.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/type.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -350,9 +350,9 @@
   floop[1] = TypeInt::INT;
   TypeTuple::LOOPBODY = TypeTuple::make( 2, floop );
 
-  TypePtr::NULL_PTR= TypePtr::make( AnyPtr, TypePtr::Null, 0 );
-  TypePtr::NOTNULL = TypePtr::make( AnyPtr, TypePtr::NotNull, OffsetBot );
-  TypePtr::BOTTOM  = TypePtr::make( AnyPtr, TypePtr::BotPTR, OffsetBot );
+  TypePtr::NULL_PTR= TypePtr::make(AnyPtr, TypePtr::Null, 0);
+  TypePtr::NOTNULL = TypePtr::make(AnyPtr, TypePtr::NotNull, OffsetBot);
+  TypePtr::BOTTOM  = TypePtr::make(AnyPtr, TypePtr::BotPTR, OffsetBot);
 
   TypeRawPtr::BOTTOM = TypeRawPtr::make( TypePtr::BotPTR );
   TypeRawPtr::NOTNULL= TypeRawPtr::make( TypePtr::NotNull );
@@ -372,7 +372,7 @@
                                            false, 0, oopDesc::mark_offset_in_bytes());
   TypeInstPtr::KLASS   = TypeInstPtr::make(TypePtr::BotPTR,  current->env()->Object_klass(),
                                            false, 0, oopDesc::klass_offset_in_bytes());
-  TypeOopPtr::BOTTOM  = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot, NULL);
+  TypeOopPtr::BOTTOM  = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot);
 
   TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, OffsetBot);
 
@@ -620,8 +620,8 @@
     return true;
   }
   // Now check the speculative parts as well
-  const TypeOopPtr* this_spec = isa_oopptr() != NULL ? isa_oopptr()->speculative() : NULL;
-  const TypeOopPtr* t_spec = t->isa_oopptr() != NULL ? t->isa_oopptr()->speculative() : NULL;
+  const TypePtr* this_spec = isa_ptr() != NULL ? is_ptr()->speculative() : NULL;
+  const TypePtr* t_spec = t->isa_ptr() != NULL ? t->is_ptr()->speculative() : NULL;
   if (this_spec != NULL && t_spec != NULL) {
     if (this_spec->interface_vs_oop_helper(t_spec)) {
       return true;
@@ -1975,6 +1975,25 @@
   return make(_elem->remove_speculative(), _size, _stable);
 }
 
+/**
+ * Return same type with cleaned up speculative part of element
+ */
+const Type* TypeAry::cleanup_speculative() const {
+  return make(_elem->cleanup_speculative(), _size, _stable);
+}
+
+/**
+ * Return same type but with a different inline depth (used for speculation)
+ *
+ * @param depth  depth to meet with
+ */
+const TypePtr* TypePtr::with_inline_depth(int depth) const {
+  if (!UseInlineDepthForSpeculativeTypes) {
+    return this;
+  }
+  return make(AnyPtr, _ptr, _offset, _speculative, depth);
+}
+
 //----------------------interface_vs_oop---------------------------------------
 #ifdef ASSERT
 bool TypeAry::interface_vs_oop(const Type *t) const {
@@ -2179,15 +2198,15 @@
 };
 
 //------------------------------make-------------------------------------------
-const TypePtr *TypePtr::make( TYPES t, enum PTR ptr, int offset ) {
-  return (TypePtr*)(new TypePtr(t,ptr,offset))->hashcons();
+const TypePtr *TypePtr::make(TYPES t, enum PTR ptr, int offset, const TypePtr* speculative, int inline_depth) {
+  return (TypePtr*)(new TypePtr(t,ptr,offset, speculative, inline_depth))->hashcons();
 }
 
 //------------------------------cast_to_ptr_type-------------------------------
 const Type *TypePtr::cast_to_ptr_type(PTR ptr) const {
   assert(_base == AnyPtr, "subclass must override cast_to_ptr_type");
   if( ptr == _ptr ) return this;
-  return make(_base, ptr, _offset);
+  return make(_base, ptr, _offset, _speculative, _inline_depth);
 }
 
 //------------------------------get_con----------------------------------------
@@ -2198,7 +2217,29 @@
 
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  It returns a new Type object.
-const Type *TypePtr::xmeet( const Type *t ) const {
+const Type *TypePtr::xmeet(const Type *t) const {
+  const Type* res = xmeet_helper(t);
+  if (res->isa_ptr() == NULL) {
+    return res;
+  }
+
+  const TypePtr* res_ptr = res->is_ptr();
+  if (res_ptr->speculative() != NULL) {
+    // type->speculative() == NULL means that speculation is no better
+    // than type, i.e. type->speculative() == type. So there are 2
+    // ways to represent the fact that we have no useful speculative
+    // data and we should use a single one to be able to test for
+    // equality between types. Check whether type->speculative() ==
+    // type and set speculative to NULL if it is the case.
+    if (res_ptr->remove_speculative() == res_ptr->speculative()) {
+      return res_ptr->remove_speculative();
+    }
+  }
+
+  return res;
+}
+
+const Type *TypePtr::xmeet_helper(const Type *t) const {
   // Perform a fast test for common case; meeting the same types together.
   if( this == t ) return this;  // Meeting same type-rep?
 
@@ -2221,7 +2262,9 @@
 
   case AnyPtr: {                // Meeting to AnyPtrs
     const TypePtr *tp = t->is_ptr();
-    return make( AnyPtr, meet_ptr(tp->ptr()), meet_offset(tp->offset()) );
+    const TypePtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
+    return make(AnyPtr, meet_ptr(tp->ptr()), meet_offset(tp->offset()), speculative, depth);
   }
   case RawPtr:                  // For these, flip the call around to cut down
   case OopPtr:
@@ -2260,7 +2303,7 @@
   BotPTR, NotNull, Constant, Null, AnyNull, TopPTR
 };
 const Type *TypePtr::xdual() const {
-  return new TypePtr( AnyPtr, dual_ptr(), dual_offset() );
+  return new TypePtr(AnyPtr, dual_ptr(), dual_offset(), dual_speculative(), dual_inline_depth());
 }
 
 //------------------------------xadd_offset------------------------------------
@@ -2281,20 +2324,245 @@
 
 //------------------------------add_offset-------------------------------------
 const TypePtr *TypePtr::add_offset( intptr_t offset ) const {
-  return make( AnyPtr, _ptr, xadd_offset(offset) );
+  return make(AnyPtr, _ptr, xadd_offset(offset), _speculative, _inline_depth);
 }
 
 //------------------------------eq---------------------------------------------
 // Structural equality check for Type representations
 bool TypePtr::eq( const Type *t ) const {
   const TypePtr *a = (const TypePtr*)t;
-  return _ptr == a->ptr() && _offset == a->offset();
+  return _ptr == a->ptr() && _offset == a->offset() && eq_speculative(a) && _inline_depth == a->_inline_depth;
 }
 
 //------------------------------hash-------------------------------------------
 // Type-specific hashing function.
 int TypePtr::hash(void) const {
-  return _ptr + _offset;
+  return _ptr + _offset + hash_speculative() + _inline_depth;
+;
+}
+
+/**
+ * Return same type without a speculative part
+ */
+const Type* TypePtr::remove_speculative() const {
+  if (_speculative == NULL) {
+    return this;
+  }
+  assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
+  return make(AnyPtr, _ptr, _offset, NULL, _inline_depth);
+}
+
+/**
+ * Return same type but drop speculative part if we know we won't use
+ * it
+ */
+const Type* TypePtr::cleanup_speculative() const {
+  if (speculative() == NULL) {
+    return this;
+  }
+  const Type* no_spec = remove_speculative();
+  // If this is NULL_PTR then we don't need the speculative type
+  // (with_inline_depth in case the current type inline depth is
+  // InlineDepthTop)
+  if (no_spec == NULL_PTR->with_inline_depth(inline_depth())) {
+    return no_spec;
+  }
+  if (above_centerline(speculative()->ptr())) {
+    return no_spec;
+  }
+  const TypeOopPtr* spec_oopptr = speculative()->isa_oopptr();
+  // If the speculative may be null and is an inexact klass then it
+  // doesn't help
+  if (speculative()->maybe_null() && (spec_oopptr == NULL || !spec_oopptr->klass_is_exact())) {
+    return no_spec;
+  }
+  return this;
+}
+
+/**
+ * dual of the speculative part of the type
+ */
+const TypePtr* TypePtr::dual_speculative() const {
+  if (_speculative == NULL) {
+    return NULL;
+  }
+  return _speculative->dual()->is_ptr();
+}
+
+/**
+ * meet of the speculative parts of 2 types
+ *
+ * @param other  type to meet with
+ */
+const TypePtr* TypePtr::xmeet_speculative(const TypePtr* other) const {
+  bool this_has_spec = (_speculative != NULL);
+  bool other_has_spec = (other->speculative() != NULL);
+
+  if (!this_has_spec && !other_has_spec) {
+    return NULL;
+  }
+
+  // If we are at a point where control flow meets and one branch has
+  // a speculative type and the other has not, we meet the speculative
+  // type of one branch with the actual type of the other. If the
+  // actual type is exact and the speculative is as well, then the
+  // result is a speculative type which is exact and we can continue
+  // speculation further.
+  const TypePtr* this_spec = _speculative;
+  const TypePtr* other_spec = other->speculative();
+
+  if (!this_has_spec) {
+    this_spec = this;
+  }
+
+  if (!other_has_spec) {
+    other_spec = other;
+  }
+
+  return this_spec->meet(other_spec)->is_ptr();
+}
+
+/**
+ * dual of the inline depth for this type (used for speculation)
+ */
+int TypePtr::dual_inline_depth() const {
+  return -inline_depth();
+}
+
+/**
+ * meet of 2 inline depths (used for speculation)
+ *
+ * @param depth  depth to meet with
+ */
+int TypePtr::meet_inline_depth(int depth) const {
+  return MAX2(inline_depth(), depth);
+}
+
+/**
+ * Are the speculative parts of 2 types equal?
+ *
+ * @param other  type to compare this one to
+ */
+bool TypePtr::eq_speculative(const TypePtr* other) const {
+  if (_speculative == NULL || other->speculative() == NULL) {
+    return _speculative == other->speculative();
+  }
+
+  if (_speculative->base() != other->speculative()->base()) {
+    return false;
+  }
+
+  return _speculative->eq(other->speculative());
+}
+
+/**
+ * Hash of the speculative part of the type
+ */
+int TypePtr::hash_speculative() const {
+  if (_speculative == NULL) {
+    return 0;
+  }
+
+  return _speculative->hash();
+}
+
+/**
+ * add offset to the speculative part of the type
+ *
+ * @param offset  offset to add
+ */
+const TypePtr* TypePtr::add_offset_speculative(intptr_t offset) const {
+  if (_speculative == NULL) {
+    return NULL;
+  }
+  return _speculative->add_offset(offset)->is_ptr();
+}
+
+/**
+ * return exact klass from the speculative type if there's one
+ */
+ciKlass* TypePtr::speculative_type() const {
+  if (_speculative != NULL && _speculative->isa_oopptr()) {
+    const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
+    if (speculative->klass_is_exact()) {
+      return speculative->klass();
+    }
+  }
+  return NULL;
+}
+
+/**
+ * return true if speculative type may be null
+ */
+bool TypePtr::speculative_maybe_null() const {
+  if (_speculative != NULL) {
+    const TypePtr* speculative = _speculative->join(this)->is_ptr();
+    return speculative->maybe_null();
+  }
+  return true;
+}
+
+/**
+ * Same as TypePtr::speculative_type() but return the klass only if
+ * the speculative tells us is not null
+ */
+ciKlass* TypePtr::speculative_type_not_null() const {
+  if (speculative_maybe_null()) {
+    return NULL;
+  }
+  return speculative_type();
+}
+
+/**
+ * Check whether new profiling would improve speculative type
+ *
+ * @param   exact_kls    class from profiling
+ * @param   inline_depth inlining depth of profile point
+ *
+ * @return  true if type profile is valuable
+ */
+bool TypePtr::would_improve_type(ciKlass* exact_kls, int inline_depth) const {
+  // no profiling?
+  if (exact_kls == NULL) {
+    return false;
+  }
+  // no speculative type or non exact speculative type?
+  if (speculative_type() == NULL) {
+    return true;
+  }
+  // If the node already has an exact speculative type keep it,
+  // unless it was provided by profiling that is at a deeper
+  // inlining level. Profiling at a higher inlining depth is
+  // expected to be less accurate.
+  if (_speculative->inline_depth() == InlineDepthBottom) {
+    return false;
+  }
+  assert(_speculative->inline_depth() != InlineDepthTop, "can't do the comparison");
+  return inline_depth < _speculative->inline_depth();
+}
+
+/**
+ * Check whether new profiling would improve ptr (= tells us it is non
+ * null)
+ *
+ * @param   maybe_null true if profiling tells the ptr may be null
+ *
+ * @return  true if ptr profile is valuable
+ */
+bool TypePtr::would_improve_ptr(bool maybe_null) const {
+  // profiling doesn't tell us anything useful
+  if (maybe_null) {
+    return false;
+  }
+  // We already know this is not be null
+  if (!this->maybe_null()) {
+    return false;
+  }
+  // We already know the speculative type cannot be null
+  if (!speculative_maybe_null()) {
+    return false;
+  }
+  return true;
 }
 
 //------------------------------dump2------------------------------------------
@@ -2309,6 +2577,32 @@
   if( _offset == OffsetTop ) st->print("+top");
   else if( _offset == OffsetBot ) st->print("+bot");
   else if( _offset ) st->print("+%d", _offset);
+  dump_inline_depth(st);
+  dump_speculative(st);
+}
+
+/**
+ *dump the speculative part of the type
+ */
+void TypePtr::dump_speculative(outputStream *st) const {
+  if (_speculative != NULL) {
+    st->print(" (speculative=");
+    _speculative->dump_on(st);
+    st->print(")");
+  }
+}
+
+/**
+ *dump the inline depth of the type
+ */
+void TypePtr::dump_inline_depth(outputStream *st) const {
+  if (_inline_depth != InlineDepthBottom) {
+    if (_inline_depth == InlineDepthTop) {
+      st->print(" (inline_depth=InlineDepthTop)");
+    } else {
+      st->print(" (inline_depth=%d)", _inline_depth);
+    }
+  }
 }
 #endif
 
@@ -2399,7 +2693,7 @@
   case TypePtr::Null:
     if( _ptr == TypePtr::TopPTR ) return t;
     return TypeRawPtr::BOTTOM;
-  case TypePtr::NotNull: return TypePtr::make( AnyPtr, meet_ptr(TypePtr::NotNull), tp->meet_offset(0) );
+  case TypePtr::NotNull: return TypePtr::make(AnyPtr, meet_ptr(TypePtr::NotNull), tp->meet_offset(0), tp->speculative(), tp->inline_depth());
   case TypePtr::AnyNull:
     if( _ptr == TypePtr::Constant) return this;
     return make( meet_ptr(TypePtr::AnyNull) );
@@ -2463,16 +2757,15 @@
 const TypeOopPtr *TypeOopPtr::BOTTOM;
 
 //------------------------------TypeOopPtr-------------------------------------
-TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth)
-  : TypePtr(t, ptr, offset),
+TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset,
+                       int instance_id, const TypePtr* speculative, int inline_depth)
+  : TypePtr(t, ptr, offset, speculative, inline_depth),
     _const_oop(o), _klass(k),
     _klass_is_exact(xk),
     _is_ptr_to_narrowoop(false),
     _is_ptr_to_narrowklass(false),
     _is_ptr_to_boxed_value(false),
-    _instance_id(instance_id),
-    _speculative(speculative),
-    _inline_depth(inline_depth){
+    _instance_id(instance_id) {
   if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
       (offset > 0) && xk && (k != 0) && k->is_instance_klass()) {
     _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset);
@@ -2538,8 +2831,8 @@
 }
 
 //------------------------------make-------------------------------------------
-const TypeOopPtr *TypeOopPtr::make(PTR ptr,
-                                   int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth) {
+const TypeOopPtr *TypeOopPtr::make(PTR ptr, int offset, int instance_id,
+                                     const TypePtr* speculative, int inline_depth) {
   assert(ptr != Constant, "no constant generic pointers");
   ciKlass*  k = Compile::current()->env()->Object_klass();
   bool      xk = false;
@@ -2582,28 +2875,6 @@
     return TypeKlassPtr::make(xk? Constant: NotNull, k, 0);
 }
 
-const Type *TypeOopPtr::xmeet(const Type *t) const {
-  const Type* res = xmeet_helper(t);
-  if (res->isa_oopptr() == NULL) {
-    return res;
-  }
-
-  const TypeOopPtr* res_oopptr = res->is_oopptr();
-  if (res_oopptr->speculative() != NULL) {
-    // type->speculative() == NULL means that speculation is no better
-    // than type, i.e. type->speculative() == type. So there are 2
-    // ways to represent the fact that we have no useful speculative
-    // data and we should use a single one to be able to test for
-    // equality between types. Check whether type->speculative() ==
-    // type and set speculative to NULL if it is the case.
-    if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
-      return res_oopptr->remove_speculative();
-    }
-  }
-
-  return res;
-}
-
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  It returns a new Type object.
 const Type *TypeOopPtr::xmeet_helper(const Type *t) const {
@@ -2641,19 +2912,20 @@
     const TypePtr *tp = t->is_ptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
+    const TypePtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (tp->ptr()) {
     case Null:
-      if (ptr == Null)  return TypePtr::make(AnyPtr, ptr, offset);
+      if (ptr == Null)  return TypePtr::make(AnyPtr, ptr, offset, speculative, depth);
       // else fall through:
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = _speculative;
-      return make(ptr, offset, instance_id, speculative, _inline_depth);
+      return make(ptr, offset, instance_id, speculative, depth);
     }
     case BotPTR:
     case NotNull:
-      return TypePtr::make(AnyPtr, ptr, offset);
+      return TypePtr::make(AnyPtr, ptr, offset, speculative, depth);
     default: typerr(t);
     }
   }
@@ -2661,7 +2933,7 @@
   case OopPtr: {                 // Meeting to other OopPtrs
     const TypeOopPtr *tp = t->is_oopptr();
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    const TypePtr* speculative = xmeet_speculative(tp);
     int depth = meet_inline_depth(tp->inline_depth());
     return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative, depth);
   }
@@ -2859,9 +3131,7 @@
 bool TypeOopPtr::eq( const Type *t ) const {
   const TypeOopPtr *a = (const TypeOopPtr*)t;
   if (_klass_is_exact != a->_klass_is_exact ||
-      _instance_id != a->_instance_id ||
-      !eq_speculative(a) ||
-      _inline_depth != a->_inline_depth)  return false;
+      _instance_id != a->_instance_id)  return false;
   ciObject* one = const_oop();
   ciObject* two = a->const_oop();
   if (one == NULL || two == NULL) {
@@ -2878,8 +3148,6 @@
     (const_oop() ? const_oop()->hash() : 0) +
     _klass_is_exact +
     _instance_id +
-    hash_speculative() +
-    _inline_depth +
     TypePtr::hash();
 }
 
@@ -2903,27 +3171,6 @@
   dump_inline_depth(st);
   dump_speculative(st);
 }
-
-/**
- *dump the speculative part of the type
- */
-void TypeOopPtr::dump_speculative(outputStream *st) const {
-  if (_speculative != NULL) {
-    st->print(" (speculative=");
-    _speculative->dump_on(st);
-    st->print(")");
-  }
-}
-
-void TypeOopPtr::dump_inline_depth(outputStream *st) const {
-  if (_inline_depth != InlineDepthBottom) {
-    if (_inline_depth == InlineDepthTop) {
-      st->print(" (inline_depth=InlineDepthTop)");
-    } else {
-      st->print(" (inline_depth=%d)", _inline_depth);
-    }
-  }
-}
 #endif
 
 //------------------------------singleton--------------------------------------
@@ -2952,49 +3199,30 @@
 }
 
 /**
+ * Return same type but drop speculative part if we know we won't use
+ * it
+ */
+const Type* TypeOopPtr::cleanup_speculative() const {
+  // If the klass is exact and the ptr is not null then there's
+  // nothing that the speculative type can help us with
+  if (klass_is_exact() && !maybe_null()) {
+    return remove_speculative();
+  }
+  return TypePtr::cleanup_speculative();
+}
+
+/**
  * Return same type but with a different inline depth (used for speculation)
  *
  * @param depth  depth to meet with
  */
-const TypeOopPtr* TypeOopPtr::with_inline_depth(int depth) const {
+const TypePtr* TypeOopPtr::with_inline_depth(int depth) const {
   if (!UseInlineDepthForSpeculativeTypes) {
     return this;
   }
   return make(_ptr, _offset, _instance_id, _speculative, depth);
 }
 
-/**
- * Check whether new profiling would improve speculative type
- *
- * @param   exact_kls    class from profiling
- * @param   inline_depth inlining depth of profile point
- *
- * @return  true if type profile is valuable
- */
-bool TypeOopPtr::would_improve_type(ciKlass* exact_kls, int inline_depth) const {
-  // no way to improve an already exact type
-  if (klass_is_exact()) {
-    return false;
-  }
-  // no profiling?
-  if (exact_kls == NULL) {
-    return false;
-  }
-  // no speculative type or non exact speculative type?
-  if (speculative_type() == NULL) {
-    return true;
-  }
-  // If the node already has an exact speculative type keep it,
-  // unless it was provided by profiling that is at a deeper
-  // inlining level. Profiling at a higher inlining depth is
-  // expected to be less accurate.
-  if (_speculative->inline_depth() == InlineDepthBottom) {
-    return false;
-  }
-  assert(_speculative->inline_depth() != InlineDepthTop, "can't do the comparison");
-  return inline_depth < _speculative->inline_depth();
-}
-
 //------------------------------meet_instance_id--------------------------------
 int TypeOopPtr::meet_instance_id( int instance_id ) const {
   // Either is 'TOP' instance?  Return the other instance!
@@ -3013,102 +3241,19 @@
 }
 
 /**
- * meet of the speculative parts of 2 types
+ * Check whether new profiling would improve speculative type
  *
- * @param other  type to meet with
+ * @param   exact_kls    class from profiling
+ * @param   inline_depth inlining depth of profile point
+ *
+ * @return  true if type profile is valuable
  */
-const TypeOopPtr* TypeOopPtr::xmeet_speculative(const TypeOopPtr* other) const {
-  bool this_has_spec = (_speculative != NULL);
-  bool other_has_spec = (other->speculative() != NULL);
-
-  if (!this_has_spec && !other_has_spec) {
-    return NULL;
-  }
-
-  // If we are at a point where control flow meets and one branch has
-  // a speculative type and the other has not, we meet the speculative
-  // type of one branch with the actual type of the other. If the
-  // actual type is exact and the speculative is as well, then the
-  // result is a speculative type which is exact and we can continue
-  // speculation further.
-  const TypeOopPtr* this_spec = _speculative;
-  const TypeOopPtr* other_spec = other->speculative();
-
-  if (!this_has_spec) {
-    this_spec = this;
-  }
-
-  if (!other_has_spec) {
-    other_spec = other;
-  }
-
-  return this_spec->meet_speculative(other_spec)->is_oopptr();
-}
-
-/**
- * dual of the speculative part of the type
- */
-const TypeOopPtr* TypeOopPtr::dual_speculative() const {
-  if (_speculative == NULL) {
-    return NULL;
-  }
-  return _speculative->dual()->is_oopptr();
-}
-
-/**
- * add offset to the speculative part of the type
- *
- * @param offset  offset to add
- */
-const TypeOopPtr* TypeOopPtr::add_offset_speculative(intptr_t offset) const {
-  if (_speculative == NULL) {
-    return NULL;
-  }
-  return _speculative->add_offset(offset)->is_oopptr();
-}
-
-/**
- * Are the speculative parts of 2 types equal?
- *
- * @param other  type to compare this one to
- */
-bool TypeOopPtr::eq_speculative(const TypeOopPtr* other) const {
-  if (_speculative == NULL || other->speculative() == NULL) {
-    return _speculative == other->speculative();
-  }
-
-  if (_speculative->base() != other->speculative()->base()) {
+bool TypeOopPtr::would_improve_type(ciKlass* exact_kls, int inline_depth) const {
+  // no way to improve an already exact type
+  if (klass_is_exact()) {
     return false;
   }
-
-  return _speculative->eq(other->speculative());
-}
-
-/**
- * Hash of the speculative part of the type
- */
-int TypeOopPtr::hash_speculative() const {
-  if (_speculative == NULL) {
-    return 0;
-  }
-
-  return _speculative->hash();
-}
-
-/**
- * dual of the inline depth for this type (used for speculation)
- */
-int TypeOopPtr::dual_inline_depth() const {
-  return -inline_depth();
-}
-
-/**
- * meet of 2 inline depth (used for speculation)
- *
- * @param depth  depth to meet with
- */
-int TypeOopPtr::meet_inline_depth(int depth) const {
-  return MAX2(inline_depth(), depth);
+  return TypePtr::would_improve_type(exact_kls, inline_depth);
 }
 
 //=============================================================================
@@ -3120,8 +3265,10 @@
 const TypeInstPtr *TypeInstPtr::KLASS;
 
 //------------------------------TypeInstPtr-------------------------------------
-TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id, const TypeOopPtr* speculative, int inline_depth)
-  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative, inline_depth), _name(k->name()) {
+TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off,
+                         int instance_id, const TypePtr* speculative, int inline_depth)
+  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative, inline_depth),
+    _name(k->name()) {
    assert(k != NULL &&
           (k->is_loaded() || o == NULL),
           "cannot have constants with non-loaded klass");
@@ -3134,7 +3281,7 @@
                                      ciObject* o,
                                      int offset,
                                      int instance_id,
-                                     const TypeOopPtr* speculative,
+                                     const TypePtr* speculative,
                                      int inline_depth) {
   assert( !k->is_loaded() || k->is_instance_klass(), "Must be for instance");
   // Either const_oop() is NULL or else ptr is Constant
@@ -3217,7 +3364,7 @@
     int off = meet_offset(tinst->offset());
     PTR ptr = meet_ptr(tinst->ptr());
     int instance_id = meet_instance_id(tinst->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tinst);
+    const TypePtr* speculative = xmeet_speculative(tinst);
     int depth = meet_inline_depth(tinst->inline_depth());
 
     const TypeInstPtr *loaded    = is_loaded() ? this  : tinst;
@@ -3295,7 +3442,7 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    const TypePtr* speculative = xmeet_speculative(tp);
     int depth = meet_inline_depth(tp->inline_depth());
     switch (ptr) {
     case TopPTR:
@@ -3346,7 +3493,7 @@
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = xmeet_speculative(tp);
+      const TypePtr* speculative = xmeet_speculative(tp);
       int depth = meet_inline_depth(tp->inline_depth());
       return make(ptr, klass(), klass_is_exact(),
                   (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, depth);
@@ -3354,7 +3501,7 @@
     case NotNull:
     case BotPTR: {
       int instance_id = meet_instance_id(tp->instance_id());
-      const TypeOopPtr* speculative = xmeet_speculative(tp);
+      const TypePtr* speculative = xmeet_speculative(tp);
       int depth = meet_inline_depth(tp->inline_depth());
       return TypeOopPtr::make(ptr, offset, instance_id, speculative, depth);
     }
@@ -3367,20 +3514,21 @@
     const TypePtr *tp = t->is_ptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
+    int instance_id = meet_instance_id(InstanceTop);
+    const TypePtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (tp->ptr()) {
     case Null:
-      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset);
+      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset, speculative, depth);
       // else fall through to AnyNull
     case TopPTR:
     case AnyNull: {
-      int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = _speculative;
       return make(ptr, klass(), klass_is_exact(),
-                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, _inline_depth);
+                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, depth);
     }
     case NotNull:
     case BotPTR:
-      return TypePtr::make(AnyPtr, ptr, offset);
+      return TypePtr::make(AnyPtr, ptr, offset, speculative,depth);
     default: typerr(t);
     }
   }
@@ -3407,7 +3555,7 @@
     int off = meet_offset( tinst->offset() );
     PTR ptr = meet_ptr( tinst->ptr() );
     int instance_id = meet_instance_id(tinst->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tinst);
+    const TypePtr* speculative = xmeet_speculative(tinst);
     int depth = meet_inline_depth(tinst->inline_depth());
 
     // Check for easy case; klasses are equal (and perhaps not loaded!)
@@ -3563,6 +3711,7 @@
     // class hierarchy - which means we have to fall to at least NotNull.
     if( ptr == TopPTR || ptr == AnyNull || ptr == Constant )
       ptr = NotNull;
+
     instance_id = InstanceBot;
 
     // Now we find the LCA of Java classes
@@ -3655,7 +3804,8 @@
 
 //------------------------------add_offset-------------------------------------
 const TypePtr *TypeInstPtr::add_offset(intptr_t offset) const {
-  return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset),
+              _instance_id, add_offset_speculative(offset), _inline_depth);
 }
 
 const Type *TypeInstPtr::remove_speculative() const {
@@ -3663,10 +3813,11 @@
     return this;
   }
   assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
-  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL, _inline_depth);
-}
-
-const TypeOopPtr *TypeInstPtr::with_inline_depth(int depth) const {
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset,
+              _instance_id, NULL, _inline_depth);
+}
+
+const TypePtr *TypeInstPtr::with_inline_depth(int depth) const {
   if (!UseInlineDepthForSpeculativeTypes) {
     return this;
   }
@@ -3687,7 +3838,8 @@
 const TypeAryPtr *TypeAryPtr::DOUBLES;
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset,
+                                   int instance_id, const TypePtr* speculative, int inline_depth) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   if (!xk)  xk = ary->ary_must_be_exact();
@@ -3697,7 +3849,9 @@
 }
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth, bool is_autobox_cache) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset,
+                                   int instance_id, const TypePtr* speculative, int inline_depth,
+                                   bool is_autobox_cache) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
@@ -3807,7 +3961,7 @@
 
   const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
 
-  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
+  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth);
 }
 
 //-----------------------------stable_dimension--------------------------------
@@ -3868,18 +4022,17 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int depth = meet_inline_depth(tp->inline_depth());
+    const TypePtr* speculative = xmeet_speculative(tp);
     switch (tp->ptr()) {
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = xmeet_speculative(tp);
       return make(ptr, (ptr == Constant ? const_oop() : NULL),
                   _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
     }
     case BotPTR:
     case NotNull: {
       int instance_id = meet_instance_id(tp->instance_id());
-      const TypeOopPtr* speculative = xmeet_speculative(tp);
       return TypeOopPtr::make(ptr, offset, instance_id, speculative, depth);
     }
     default: ShouldNotReachHere();
@@ -3891,20 +4044,21 @@
     const TypePtr *tp = t->is_ptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
+    const TypePtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (tp->ptr()) {
     case TopPTR:
       return this;
     case BotPTR:
     case NotNull:
-      return TypePtr::make(AnyPtr, ptr, offset);
+      return TypePtr::make(AnyPtr, ptr, offset, speculative, depth);
     case Null:
-      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset);
+      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset, speculative, depth);
       // else fall through to AnyNull
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = _speculative;
       return make(ptr, (ptr == Constant ? const_oop() : NULL),
-                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative, _inline_depth);
+                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
     }
     default: ShouldNotReachHere();
     }
@@ -3920,7 +4074,7 @@
     const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary();
     PTR ptr = meet_ptr(tap->ptr());
     int instance_id = meet_instance_id(tap->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tap);
+    const TypePtr* speculative = xmeet_speculative(tap);
     int depth = meet_inline_depth(tap->inline_depth());
     ciKlass* lazy_klass = NULL;
     if (tary->_elem->isa_int()) {
@@ -3949,7 +4103,7 @@
            // 'this' is exact and super or unrelated:
            (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
       tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
-      return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot);
+      return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot, speculative, depth);
     }
 
     bool xk = false;
@@ -4001,7 +4155,7 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    const TypePtr* speculative = xmeet_speculative(tp);
     int depth = meet_inline_depth(tp->inline_depth());
     switch (ptr) {
     case TopPTR:
@@ -4125,7 +4279,7 @@
   return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL, _inline_depth);
 }
 
-const TypeOopPtr *TypeAryPtr::with_inline_depth(int depth) const {
+const TypePtr *TypeAryPtr::with_inline_depth(int depth) const {
   if (!UseInlineDepthForSpeculativeTypes) {
     return this;
   }
@@ -4250,6 +4404,13 @@
   return (const TypeNarrowOop*)(new TypeNarrowOop(type))->hashcons();
 }
 
+const Type* TypeNarrowOop::remove_speculative() const {
+  return make(_ptrtype->remove_speculative()->is_ptr());
+}
+
+const Type* TypeNarrowOop::cleanup_speculative() const {
+  return make(_ptrtype->cleanup_speculative()->is_ptr());
+}
 
 #ifndef PRODUCT
 void TypeNarrowOop::dump2( Dict & d, uint depth, outputStream *st ) const {
@@ -4376,7 +4537,7 @@
     PTR ptr = meet_ptr(tp->ptr());
     switch (tp->ptr()) {
     case Null:
-      if (ptr == Null)  return TypePtr::make(AnyPtr, ptr, offset);
+      if (ptr == Null)  return TypePtr::make(AnyPtr, ptr, offset, tp->speculative(), tp->inline_depth());
       // else fall through:
     case TopPTR:
     case AnyNull: {
@@ -4384,7 +4545,7 @@
     }
     case BotPTR:
     case NotNull:
-      return TypePtr::make(AnyPtr, ptr, offset);
+      return TypePtr::make(AnyPtr, ptr, offset, tp->speculative(), tp->inline_depth());
     default: typerr(t);
     }
   }
@@ -4698,12 +4859,12 @@
     case TopPTR:
       return this;
     case Null:
-      if( ptr == Null ) return TypePtr::make( AnyPtr, ptr, offset );
+      if( ptr == Null ) return TypePtr::make(AnyPtr, ptr, offset, tp->speculative(), tp->inline_depth());
     case AnyNull:
       return make( ptr, klass(), offset );
     case BotPTR:
     case NotNull:
-      return TypePtr::make(AnyPtr, ptr, offset);
+      return TypePtr::make(AnyPtr, ptr, offset, tp->speculative(), tp->inline_depth());
     default: typerr(t);
     }
   }
--- a/hotspot/src/share/vm/opto/type.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/opto/type.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -224,7 +224,7 @@
   }
   // Variant that keeps the speculative part of the types
   const Type *meet_speculative(const Type *t) const {
-    return meet_helper(t, true);
+    return meet_helper(t, true)->cleanup_speculative();
   }
   // WIDEN: 'widens' for Ints and other range types
   virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
@@ -247,7 +247,7 @@
   }
   // Variant that keeps the speculative part of the types
   const Type *join_speculative(const Type *t) const {
-    return join_helper(t, true);
+    return join_helper(t, true)->cleanup_speculative();
   }
 
   // Modified version of JOIN adapted to the needs Node::Value.
@@ -259,7 +259,7 @@
   }
   // Variant that keeps the speculative part of the types
   const Type *filter_speculative(const Type *kills) const {
-    return filter_helper(kills, true);
+    return filter_helper(kills, true)->cleanup_speculative();
   }
 
 #ifdef ASSERT
@@ -414,15 +414,18 @@
                                         bool require_constant = false,
                                         bool is_autobox_cache = false);
 
-  // Speculative type. See TypeInstPtr
-  virtual const TypeOopPtr* speculative() const { return NULL; }
-  virtual ciKlass* speculative_type() const { return NULL; }
+  // Speculative type helper methods. See TypePtr.
+  virtual const TypePtr* speculative() const                                  { return NULL; }
+  virtual ciKlass* speculative_type() const                                   { return NULL; }
+  virtual ciKlass* speculative_type_not_null() const                          { return NULL; }
+  virtual bool speculative_maybe_null() const                                 { return true; }
+  virtual const Type* remove_speculative() const                              { return this; }
+  virtual const Type* cleanup_speculative() const                             { return this; }
+  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return exact_kls != NULL; }
+  virtual bool would_improve_ptr(bool maybe_null) const                       { return !maybe_null; }
   const Type* maybe_remove_speculative(bool include_speculative) const;
-  virtual const Type* remove_speculative() const { return this; }
 
-  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const {
-    return exact_kls != NULL;
-  }
+  virtual bool maybe_null() const { return true; }
 
 private:
   // support arrays
@@ -679,6 +682,7 @@
   virtual const Type *xdual() const;    // Compute dual right now.
   bool ary_must_be_exact() const;  // true if arrays of such are never generic
   virtual const Type* remove_speculative() const;
+  virtual const Type* cleanup_speculative() const;
 #ifdef ASSERT
   // One type is interface, the other is oop
   virtual bool interface_vs_oop(const Type *t) const;
@@ -761,13 +765,48 @@
 public:
   enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR };
 protected:
-  TypePtr( TYPES t, PTR ptr, int offset ) : Type(t), _ptr(ptr), _offset(offset) {}
-  virtual bool eq( const Type *t ) const;
-  virtual int  hash() const;             // Type specific hashing
+  TypePtr(TYPES t, PTR ptr, int offset,
+          const TypePtr* speculative = NULL,
+          int inline_depth = InlineDepthBottom) :
+    Type(t), _ptr(ptr), _offset(offset), _speculative(speculative),
+    _inline_depth(inline_depth) {}
   static const PTR ptr_meet[lastPTR][lastPTR];
   static const PTR ptr_dual[lastPTR];
   static const char * const ptr_msg[lastPTR];
 
+  enum {
+    InlineDepthBottom = INT_MAX,
+    InlineDepthTop = -InlineDepthBottom
+  };
+
+  // Extra type information profiling gave us. We propagate it the
+  // same way the rest of the type info is propagated. If we want to
+  // use it, then we have to emit a guard: this part of the type is
+  // not something we know but something we speculate about the type.
+  const TypePtr*   _speculative;
+  // For speculative types, we record at what inlining depth the
+  // profiling point that provided the data is. We want to favor
+  // profile data coming from outer scopes which are likely better for
+  // the current compilation.
+  int _inline_depth;
+
+  // utility methods to work on the speculative part of the type
+  const TypePtr* dual_speculative() const;
+  const TypePtr* xmeet_speculative(const TypePtr* other) const;
+  bool eq_speculative(const TypePtr* other) const;
+  int hash_speculative() const;
+  const TypePtr* add_offset_speculative(intptr_t offset) const;
+#ifndef PRODUCT
+  void dump_speculative(outputStream *st) const;
+#endif
+
+  // utility methods to work on the inline depth of the type
+  int dual_inline_depth() const;
+  int meet_inline_depth(int depth) const;
+#ifndef PRODUCT
+  void dump_inline_depth(outputStream *st) const;
+#endif
+
 public:
   const int _offset;            // Offset into oop, with TOP & BOT
   const PTR _ptr;               // Pointer equivalence class
@@ -775,7 +814,9 @@
   const int offset() const { return _offset; }
   const PTR ptr()    const { return _ptr; }
 
-  static const TypePtr *make( TYPES t, PTR ptr, int offset );
+  static const TypePtr *make(TYPES t, PTR ptr, int offset,
+                             const TypePtr* speculative = NULL,
+                             int inline_depth = InlineDepthBottom);
 
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
@@ -784,10 +825,13 @@
 
   int xadd_offset( intptr_t offset ) const;
   virtual const TypePtr *add_offset( intptr_t offset ) const;
+  virtual bool eq(const Type *t) const;
+  virtual int  hash() const;             // Type specific hashing
 
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
   virtual const Type *xmeet( const Type *t ) const;
+  virtual const Type *xmeet_helper( const Type *t ) const;
   int meet_offset( int offset ) const;
   int dual_offset( ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
@@ -802,6 +846,20 @@
     return ptr_dual[ ptr_meet[ ptr_dual[in_ptr] ] [ dual_ptr() ] ];
   }
 
+  // Speculative type helper methods.
+  virtual const TypePtr* speculative() const { return _speculative; }
+  int inline_depth() const                   { return _inline_depth; }
+  virtual ciKlass* speculative_type() const;
+  virtual ciKlass* speculative_type_not_null() const;
+  virtual bool speculative_maybe_null() const;
+  virtual const Type* remove_speculative() const;
+  virtual const Type* cleanup_speculative() const;
+  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
+  virtual bool would_improve_ptr(bool maybe_null) const;
+  virtual const TypePtr* with_inline_depth(int depth) const;
+
+  virtual bool maybe_null() const { return meet_ptr(Null) == ptr(); }
+
   // Tests for relation to centerline of type lattice:
   static bool above_centerline(PTR ptr) { return (ptr <= AnyNull); }
   static bool below_centerline(PTR ptr) { return (ptr >= NotNull); }
@@ -850,7 +908,8 @@
 // Some kind of oop (Java pointer), either klass or instance or array.
 class TypeOopPtr : public TypePtr {
 protected:
-  TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
+  TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id,
+             const TypePtr* speculative, int inline_depth);
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -861,10 +920,6 @@
   };
 protected:
 
-  enum {
-    InlineDepthBottom = INT_MAX,
-    InlineDepthTop = -InlineDepthBottom
-  };
   // Oop is NULL, unless this is a constant oop.
   ciObject*     _const_oop;   // Constant oop
   // If _klass is NULL, then so is _sig.  This is an unloaded klass.
@@ -880,38 +935,11 @@
   // This is the the node index of the allocation node creating this instance.
   int           _instance_id;
 
-  // Extra type information profiling gave us. We propagate it the
-  // same way the rest of the type info is propagated. If we want to
-  // use it, then we have to emit a guard: this part of the type is
-  // not something we know but something we speculate about the type.
-  const TypeOopPtr*   _speculative;
-  // For speculative types, we record at what inlining depth the
-  // profiling point that provided the data is. We want to favor
-  // profile data coming from outer scopes which are likely better for
-  // the current compilation.
-  int _inline_depth;
-
   static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact);
 
   int dual_instance_id() const;
   int meet_instance_id(int uid) const;
 
-  // utility methods to work on the speculative part of the type
-  const TypeOopPtr* dual_speculative() const;
-  const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
-  bool eq_speculative(const TypeOopPtr* other) const;
-  int hash_speculative() const;
-  const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
-#ifndef PRODUCT
-  void dump_speculative(outputStream *st) const;
-#endif
-  // utility methods to work on the inline depth of the type
-  int dual_inline_depth() const;
-  int meet_inline_depth(int depth) const;
-#ifndef PRODUCT
-  void dump_inline_depth(outputStream *st) const;
-#endif
-
   // Do not allow interface-vs.-noninterface joins to collapse to top.
   virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
 
@@ -941,7 +969,9 @@
                                               bool not_null_elements = false);
 
   // Make a generic (unclassed) pointer to an oop.
-  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
+  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id,
+                                const TypePtr* speculative = NULL,
+                                int inline_depth = InlineDepthBottom);
 
   ciObject* const_oop()    const { return _const_oop; }
   virtual ciKlass* klass() const { return _klass;     }
@@ -955,7 +985,6 @@
   bool is_known_instance()       const { return _instance_id > 0; }
   int  instance_id()             const { return _instance_id; }
   bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
-  virtual const TypeOopPtr* speculative() const { return _speculative; }
 
   virtual intptr_t get_con() const;
 
@@ -969,10 +998,13 @@
   const TypeKlassPtr* as_klass_type() const;
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
-  // Return same type without a speculative part
+
+  // Speculative type helper methods.
   virtual const Type* remove_speculative() const;
+  virtual const Type* cleanup_speculative() const;
+  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
+  virtual const TypePtr* with_inline_depth(int depth) const;
 
-  virtual const Type *xmeet(const Type *t) const;
   virtual const Type *xdual() const;    // Compute dual right now.
   // the core of the computation of the meet for TypeOopPtr and for its subclasses
   virtual const Type *xmeet_helper(const Type *t) const;
@@ -982,29 +1014,14 @@
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
 #endif
-
-  // Return the speculative type if any
-  ciKlass* speculative_type() const {
-    if (_speculative != NULL) {
-      const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
-      if (speculative->klass_is_exact()) {
-        return speculative->klass();
-      }
-    }
-    return NULL;
-  }
-  int inline_depth() const {
-    return _inline_depth;
-  }
-  virtual const TypeOopPtr* with_inline_depth(int depth) const;
-  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
 };
 
 //------------------------------TypeInstPtr------------------------------------
 // Class of Java object pointers, pointing either to non-array Java instances
 // or to a Klass* (including array klasses).
 class TypeInstPtr : public TypeOopPtr {
-  TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
+  TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id,
+              const TypePtr* speculative, int inline_depth);
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
 
@@ -1040,7 +1057,10 @@
   }
 
   // Make a pointer to an oop.
-  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
+  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset,
+                                 int instance_id = InstanceBot,
+                                 const TypePtr* speculative = NULL,
+                                 int inline_depth = InlineDepthBottom);
 
   /** Create constant type for a constant boxed value */
   const Type* get_const_boxed_value() const;
@@ -1057,9 +1077,10 @@
   virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
-  // Return same type without a speculative part
+
+  // Speculative type helper methods.
   virtual const Type* remove_speculative() const;
-  virtual const TypeOopPtr* with_inline_depth(int depth) const;
+  virtual const TypePtr* with_inline_depth(int depth) const;
 
   // the core of the computation of the meet of 2 types
   virtual const Type *xmeet_helper(const Type *t) const;
@@ -1081,7 +1102,8 @@
 // Class of Java array pointers
 class TypeAryPtr : public TypeOopPtr {
   TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
-              int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative, int inline_depth)
+              int offset, int instance_id, bool is_autobox_cache,
+              const TypePtr* speculative, int inline_depth)
     : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative, inline_depth),
     _ary(ary),
     _is_autobox_cache(is_autobox_cache)
@@ -1120,9 +1142,15 @@
 
   bool is_autobox_cache() const { return _is_autobox_cache; }
 
-  static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
+  static const TypeAryPtr *make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset,
+                                int instance_id = InstanceBot,
+                                const TypePtr* speculative = NULL,
+                                int inline_depth = InlineDepthBottom);
   // Constant pointer to array
-  static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom, bool is_autobox_cache= false);
+  static const TypeAryPtr *make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset,
+                                int instance_id = InstanceBot,
+                                const TypePtr* speculative = NULL,
+                                int inline_depth = InlineDepthBottom, bool is_autobox_cache = false);
 
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
@@ -1136,9 +1164,10 @@
 
   virtual bool empty(void) const;        // TRUE if type is vacuous
   virtual const TypePtr *add_offset( intptr_t offset ) const;
-  // Return same type without a speculative part
+
+  // Speculative type helper methods.
   virtual const Type* remove_speculative() const;
-  virtual const TypeOopPtr* with_inline_depth(int depth) const;
+  virtual const TypePtr* with_inline_depth(int depth) const;
 
   // the core of the computation of the meet of 2 types
   virtual const Type *xmeet_helper(const Type *t) const;
@@ -1367,9 +1396,8 @@
   static const TypeNarrowOop *BOTTOM;
   static const TypeNarrowOop *NULL_PTR;
 
-  virtual const Type* remove_speculative() const {
-    return make(_ptrtype->remove_speculative()->is_ptr());
-  }
+  virtual const Type* remove_speculative() const;
+  virtual const Type* cleanup_speculative() const;
 
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -254,18 +254,24 @@
 # include "opto/block.hpp"
 # include "opto/c2_globals.hpp"
 # include "opto/callnode.hpp"
+# include "opto/castnode.hpp"
 # include "opto/cfgnode.hpp"
 # include "opto/compile.hpp"
 # include "opto/connode.hpp"
+# include "opto/convertnode.hpp"
+# include "opto/countbitsnode.hpp"
 # include "opto/idealGraphPrinter.hpp"
+# include "opto/intrinsicnode.hpp"
 # include "opto/loopnode.hpp"
 # include "opto/machnode.hpp"
 # include "opto/matcher.hpp"
 # include "opto/memnode.hpp"
+# include "opto/movenode.hpp"
 # include "opto/mulnode.hpp"
 # include "opto/multnode.hpp"
-# include "opto/node.hpp"
+# include "opto/narrowptrnode.hpp"
 # include "opto/opcodes.hpp"
+# include "opto/opaquenode.hpp"
 # include "opto/optoreg.hpp"
 # include "opto/phase.hpp"
 # include "opto/phaseX.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -3878,6 +3878,7 @@
 void TestVirtualSpaceNode_test();
 void TestNewSize_test();
 void TestKlass_test();
+void TestBitMap_test();
 #if INCLUDE_ALL_GCS
 void TestOldFreeSpaceCalculation_test();
 void TestG1BiasedArray_test();
@@ -3903,6 +3904,7 @@
     run_unit_test(test_loggc_filename());
     run_unit_test(TestNewSize_test());
     run_unit_test(TestKlass_test());
+    run_unit_test(TestBitMap_test());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
@@ -4001,7 +4003,7 @@
     }
 
 #ifndef PRODUCT
-  #ifndef TARGET_OS_FAMILY_windows
+  #ifndef CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED
     #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) f()
   #endif
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -3791,10 +3791,6 @@
     AlwaysIncrementalInline = false;
   }
 #endif
-  if (IncrementalInline && FLAG_IS_DEFAULT(MaxNodeLimit)) {
-    // incremental inlining: bump MaxNodeLimit
-    FLAG_SET_DEFAULT(MaxNodeLimit, (intx)75000);
-  }
   if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
     // nothing to use the profiling, turn if off
     FLAG_SET_DEFAULT(TypeProfileLevel, 0);
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -1839,6 +1839,7 @@
   "predicate",
   "loop_limit_check",
   "speculate_class_check",
+  "speculate_null_check",
   "rtm_state_change"
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -60,6 +60,7 @@
     Reason_predicate,             // compiler generated predicate failed
     Reason_loop_limit_check,      // compiler generated loop limits check failed
     Reason_speculate_class_check, // saw unexpected object class from type speculation
+    Reason_speculate_null_check,  // saw unexpected null from type speculation
     Reason_rtm_state_change,      // rtm state change detected
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
@@ -315,17 +316,27 @@
       return Reason_null_check;           // recorded per BCI as a null check
     else if (reason == Reason_speculate_class_check)
       return Reason_class_check;
+    else if (reason == Reason_speculate_null_check)
+      return Reason_null_check;
     else
       return Reason_none;
   }
 
   static bool reason_is_speculate(int reason) {
-    if (reason == Reason_speculate_class_check) {
+    if (reason == Reason_speculate_class_check || reason == Reason_speculate_null_check) {
       return true;
     }
     return false;
   }
 
+  static DeoptReason reason_null_check(bool speculative) {
+    return speculative ? Deoptimization::Reason_speculate_null_check : Deoptimization::Reason_null_check;
+  }
+
+  static DeoptReason reason_class_check(bool speculative) {
+    return speculative ? Deoptimization::Reason_speculate_class_check : Deoptimization::Reason_class_check;
+  }
+
   static uint per_method_trap_limit(int reason) {
     return reason_is_speculate(reason) ? (uint)PerMethodSpecTrapLimit : (uint)PerMethodTrapLimit;
   }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -2832,7 +2832,7 @@
           "number of method invocations/branches (expressed as % of "       \
           "CompileThreshold) before using the method's profile")            \
                                                                             \
-  develop(bool, PrintMethodData, false,                                     \
+  diagnostic(bool, PrintMethodData, false,                                  \
           "Print the results of +ProfileInterpreter at end of run")         \
                                                                             \
   develop(bool, VerifyDataPointer, trueInDebug,                             \
--- a/hotspot/src/share/vm/runtime/java.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -98,6 +98,59 @@
 #endif
 
 
+GrowableArray<Method*>* collected_profiled_methods;
+
+int compare_methods(Method** a, Method** b) {
+  // %%% there can be 32-bit overflow here
+  return ((*b)->invocation_count() + (*b)->compiled_invocation_count())
+       - ((*a)->invocation_count() + (*a)->compiled_invocation_count());
+}
+
+void collect_profiled_methods(Method* m) {
+  Thread* thread = Thread::current();
+  // This HandleMark prevents a huge amount of handles from being added
+  // to the metadata_handles() array on the thread.
+  HandleMark hm(thread);
+  methodHandle mh(thread, m);
+  if ((m->method_data() != NULL) &&
+      (PrintMethodData || CompilerOracle::should_print(mh))) {
+    collected_profiled_methods->push(m);
+  }
+}
+
+void print_method_profiling_data() {
+  if (ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) {
+    ResourceMark rm;
+    HandleMark hm;
+    collected_profiled_methods = new GrowableArray<Method*>(1024);
+    ClassLoaderDataGraph::methods_do(collect_profiled_methods);
+    collected_profiled_methods->sort(&compare_methods);
+
+    int count = collected_profiled_methods->length();
+    int total_size = 0;
+    if (count > 0) {
+      for (int index = 0; index < count; index++) {
+        Method* m = collected_profiled_methods->at(index);
+        ttyLocker ttyl;
+        tty->print_cr("------------------------------------------------------------------------");
+        m->print_invocation_count();
+        tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
+        tty->cr();
+        // Dump data on parameters if any
+        if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) {
+          tty->fill_to(2);
+          m->method_data()->parameters_type_data()->print_data_on(tty);
+        }
+        m->print_codes();
+        total_size += m->method_data()->size_in_bytes();
+      }
+      tty->print_cr("------------------------------------------------------------------------");
+      tty->print_cr("Total MDO size: %d bytes", total_size);
+    }
+  }
+}
+
+
 #ifndef PRODUCT
 
 // Statistics printing (method invocation histogram)
@@ -111,26 +164,6 @@
 }
 
 
-GrowableArray<Method*>* collected_profiled_methods;
-
-void collect_profiled_methods(Method* m) {
-  Thread* thread = Thread::current();
-  // This HandleMark prevents a huge amount of handles from being added
-  // to the metadata_handles() array on the thread.
-  HandleMark hm(thread);
-  methodHandle mh(thread, m);
-  if ((m->method_data() != NULL) &&
-      (PrintMethodData || CompilerOracle::should_print(mh))) {
-    collected_profiled_methods->push(m);
-  }
-}
-
-
-int compare_methods(Method** a, Method** b) {
-  // %%% there can be 32-bit overflow here
-  return ((*b)->invocation_count() + (*b)->compiled_invocation_count())
-       - ((*a)->invocation_count() + (*a)->compiled_invocation_count());
-}
 
 
 void print_method_invocation_histogram() {
@@ -173,37 +206,6 @@
   SharedRuntime::print_call_statistics(comp_total);
 }
 
-void print_method_profiling_data() {
-  ResourceMark rm;
-  HandleMark hm;
-  collected_profiled_methods = new GrowableArray<Method*>(1024);
-  SystemDictionary::methods_do(collect_profiled_methods);
-  collected_profiled_methods->sort(&compare_methods);
-
-  int count = collected_profiled_methods->length();
-  int total_size = 0;
-  if (count > 0) {
-    for (int index = 0; index < count; index++) {
-      Method* m = collected_profiled_methods->at(index);
-      ttyLocker ttyl;
-      tty->print_cr("------------------------------------------------------------------------");
-      //m->print_name(tty);
-      m->print_invocation_count();
-      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
-      tty->cr();
-      // Dump data on parameters if any
-      if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) {
-        tty->fill_to(2);
-        m->method_data()->parameters_type_data()->print_data_on(tty);
-      }
-      m->print_codes();
-      total_size += m->method_data()->size_in_bytes();
-    }
-    tty->print_cr("------------------------------------------------------------------------");
-    tty->print_cr("Total MDO size: %d bytes", total_size);
-  }
-}
-
 void print_bytecode_count() {
   if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
     tty->print_cr("[BytecodeCounter::counter_value = %d]", BytecodeCounter::counter_value());
@@ -281,9 +283,9 @@
   if (CountCompiledCalls) {
     print_method_invocation_histogram();
   }
-  if (ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData)) {
-    print_method_profiling_data();
-  }
+
+  print_method_profiling_data();
+
   if (TimeCompiler) {
     COMPILER2_PRESENT(Compile::print_timers();)
   }
@@ -373,6 +375,10 @@
 
 void print_statistics() {
 
+  if (PrintMethodData) {
+    print_method_profiling_data();
+  }
+
   if (CITime) {
     CompileBroker::print_times();
   }
--- a/hotspot/src/share/vm/runtime/thread.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -214,7 +214,6 @@
   debug_only(_allow_allocation_count = 0;)
   NOT_PRODUCT(_allow_safepoint_count = 0;)
   NOT_PRODUCT(_skip_gcalot = false;)
-  CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
   _jvmti_env_iteration_count = 0;
   set_allocated_bytes(0);
   _vm_operation_started_count = 0;
--- a/hotspot/src/share/vm/runtime/thread.hpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Sat Apr 05 20:59:37 2014 +0000
@@ -249,9 +249,6 @@
   // Used by SkipGCALot class.
   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 
-  // Record when GC is locked out via the GC_locker mechanism
-  CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
-
   friend class No_Alloc_Verifier;
   friend class No_Safepoint_Verifier;
   friend class Pause_No_Safepoint_Verifier;
@@ -397,7 +394,6 @@
   void clear_unhandled_oops() {
     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
   }
-  bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
 #endif // CHECK_UNHANDLED_OOPS
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/runtime/unhandledOops.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/unhandledOops.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -113,9 +113,7 @@
 
 void UnhandledOops::clear_unhandled_oops() {
   assert (CheckUnhandledOops, "should only be called with checking option");
-  if (_thread->is_gc_locked_out()) {
-    return;
-  }
+
   for (int k = 0; k < _oop_list->length(); k++) {
     UnhandledOopEntry entry = _oop_list->at(k);
     // If an entry is on the unhandled oop list but isn't on the stack
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -171,15 +171,21 @@
 #include "opto/addnode.hpp"
 #include "opto/block.hpp"
 #include "opto/callnode.hpp"
+#include "opto/castnode.hpp"
 #include "opto/cfgnode.hpp"
 #include "opto/chaitin.hpp"
+#include "opto/convertnode.hpp"
 #include "opto/divnode.hpp"
+#include "opto/intrinsicnode.hpp"
 #include "opto/locknode.hpp"
 #include "opto/loopnode.hpp"
 #include "opto/machnode.hpp"
 #include "opto/matcher.hpp"
 #include "opto/mathexactnode.hpp"
 #include "opto/mulnode.hpp"
+#include "opto/movenode.hpp"
+#include "opto/narrowptrnode.hpp"
+#include "opto/opaquenode.hpp"
 #include "opto/phaseX.hpp"
 #include "opto/parse.hpp"
 #include "opto/regalloc.hpp"
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Sat Apr 05 20:59:37 2014 +0000
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/copy.hpp"
 #ifdef TARGET_OS_FAMILY_linux
@@ -67,16 +68,14 @@
   idx_t new_size_in_words = size_in_words();
   if (in_resource_area) {
     _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
+    Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
+                         MIN2(old_size_in_words, new_size_in_words));
   } else {
-    if (old_map != NULL) {
-      _map_allocator.free();
-    }
-    _map = _map_allocator.allocate(new_size_in_words);
+    _map = _map_allocator.reallocate(new_size_in_words);
   }
-  Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
-                       MIN2(old_size_in_words, new_size_in_words));
+
   if (new_size_in_words > old_size_in_words) {
-    clear_range_of_words(old_size_in_words, size_in_words());
+    clear_range_of_words(old_size_in_words, new_size_in_words);
   }
 }
 
@@ -536,6 +535,83 @@
   tty->cr();
 }
 
+class TestBitMap : public AllStatic {
+  const static BitMap::idx_t BITMAP_SIZE = 1024;
+  static void fillBitMap(BitMap& map) {
+    map.set_bit(1);
+    map.set_bit(3);
+    map.set_bit(17);
+    map.set_bit(512);
+  }
+
+  static void testResize(bool in_resource_area) {
+    {
+      BitMap map(0, in_resource_area);
+      map.resize(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map);
+
+      BitMap map2(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map2);
+      assert(map.is_same(map2), "could be");
+    }
+
+    {
+      BitMap map(128, in_resource_area);
+      map.resize(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map);
+
+      BitMap map2(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map2);
+      assert(map.is_same(map2), "could be");
+    }
+
+    {
+      BitMap map(BITMAP_SIZE, in_resource_area);
+      map.resize(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map);
+
+      BitMap map2(BITMAP_SIZE, in_resource_area);
+      fillBitMap(map2);
+      assert(map.is_same(map2), "could be");
+    }
+  }
+
+  static void testResizeResource() {
+    ResourceMark rm;
+    testResize(true);
+  }
+
+  static void testResizeNonResource() {
+    const uintx bitmap_bytes = BITMAP_SIZE / BitsPerByte;
+
+    // Test the default behavior
+    testResize(false);
+
+    {
+      // Make sure that AllocatorMallocLimit is larger than our allocation request
+      // forcing it to call standard malloc()
+      UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes * 4);
+      testResize(false);
+    }
+    {
+      // Make sure that AllocatorMallocLimit is smaller than our allocation request
+      // forcing it to call mmap() (or equivalent)
+      UIntFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes / 4);
+      testResize(false);
+    }
+  }
+
+ public:
+  static void test() {
+    testResizeResource();
+    testResizeNonResource();
+  }
+
+};
+
+void TestBitMap_test() {
+  TestBitMap::test();
+}
 #endif
 
 
--- a/hotspot/test/compiler/ciReplay/common.sh	Sat Apr 05 21:33:11 2014 +0200
+++ b/hotspot/test/compiler/ciReplay/common.sh	Sat Apr 05 20:59:37 2014 +0000
@@ -218,7 +218,7 @@
             -XX:VMThreadStackSize=512 \
             -XX:CompilerThreadStackSize=512 \
             -XX:ParallelGCThreads=1 \
-            -XX:CICompilerCount=1 \
+            -XX:CICompilerCount=2 \
             -Xcomp \
             -XX:CICrashAt=1 \
             -XX:+CreateMinidumpOnCrash \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/codegen/C1NullCheckOfNullStore.java	Sat Apr 05 20:59:37 2014 +0000
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8039043
+ * @summary Null check is placed in a wrong place when storing a null to an object field on x64 with compressed oops off
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CompileCommand=compileonly,C1NullCheckOfNullStore::test -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-UseCompressedOops C1NullCheckOfNullStore
+ *
+ */
+
+public class C1NullCheckOfNullStore {
+  private static class Foo {
+    Object bar;
+  }
+  static private void test(Foo x) {
+    x.bar = null;
+  }
+  static public void main(String args[]) {
+    Foo x = new Foo();
+    for (int i = 0; i < 10000; i++) {
+      test(x);
+    }
+    boolean gotNPE = false;
+    try {
+      for (int i = 0; i < 10000; i++) {
+        test(null);
+      }
+    }
+    catch(NullPointerException e) {
+      gotNPE = true;
+    }
+    if (!gotNPE) {
+      throw new Error("Expecting a NullPointerException");
+    }
+  }
+}
--- a/jaxp/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/jaxp/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 fb92ed0399424193f444489ad49a16748816dc12 jdk9-b03
 2846d8fc31490897817a122a668af4f44fc913d0 jdk9-b04
 b92a20e303d24c74078888cd7084b14d7626d48f jdk9-b05
+46e4951b2a267e98341613a3b796f2c7554eb831 jdk9-b06
--- a/jaxws/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/jaxws/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -251,3 +251,4 @@
 1cd9786257ed4f82a3371fd606b162e5bb6fcd81 jdk9-b03
 da44a8bdf1f3fdd518e7d785d60cc1b15983b176 jdk9-b04
 eae966c8133fec0a8bf9e16d1274a4ede3c0fb52 jdk9-b05
+cf0a6e41670f990414cd337000ad5f3bd1908073 jdk9-b06
--- a/jdk/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/jdk/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 4111af6151ed8ca8e3f5603c69729a68427e1d5b jdk9-b03
 627deed79b595a4789fc9151455b663a47381257 jdk9-b04
 263198a1d8f1f4cb97d35f40c61704b08ebd3686 jdk9-b05
+cac7b28b8b1e0e11d7a8e1ac1fe75a03b3749eab jdk9-b06
--- a/langtools/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/langtools/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -248,3 +248,4 @@
 151222468d1d04ce6613d33efa3d45bfaf53e3e5 jdk9-b03
 fa2ec6b6b1697ae4a78b03b609664dc6b47dee86 jdk9-b04
 1d5e6fc88a4cca287090c16b0530a0d5849a5603 jdk9-b05
+31946c0a3f4dc2c78f6f09a0524aaa2a0dad1c78 jdk9-b06
--- a/nashorn/.hgtags	Sat Apr 05 21:33:11 2014 +0200
+++ b/nashorn/.hgtags	Sat Apr 05 20:59:37 2014 +0000
@@ -239,3 +239,4 @@
 832f89ff25d903c45cfc994553f1ade8821a4398 jdk9-b03
 3f6ef92cd7823372c45e79125adba4cbf1c9f7b2 jdk9-b04
 2a1cac93c33317d828d4a5b81239204a9927cc4a jdk9-b05
+1f75bcbe74e315470dc0b75b7d5bcd209e287c39 jdk9-b06