Merge
authorkvn
Fri, 08 Aug 2014 10:35:05 -0700
changeset 25937 aab180dc9283
parent 25910 afebf4c38c3f (current diff)
parent 25936 9b693ed74c13 (diff)
child 25938 d1161ea75e14
Merge
--- a/hotspot/make/jprt.properties	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/make/jprt.properties	Fri Aug 08 10:35:05 2014 -0700
@@ -356,14 +356,15 @@
   ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
 
 jprt.make.rule.test.targets.standard.reg.group = \
-  ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP
+  ${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
+  ${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
+  ${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
+  ${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
+  ${jprt.my.windows.i586}-fastdebug-c1-GROUP
 
 jprt.make.rule.test.targets.standard = \
   ${jprt.make.rule.test.targets.standard.client}, \
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -298,6 +298,7 @@
     LWZ_OPCODE   = (32u << OPCODE_SHIFT),
     LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
     LWZU_OPCODE  = (33u << OPCODE_SHIFT),
+    LWBRX_OPCODE = (31u << OPCODE_SHIFT |  534 << 1),
 
     LHA_OPCODE   = (42u << OPCODE_SHIFT),
     LHAX_OPCODE  = (31u << OPCODE_SHIFT | 343u << 1),
@@ -306,6 +307,7 @@
     LHZ_OPCODE   = (40u << OPCODE_SHIFT),
     LHZX_OPCODE  = (31u << OPCODE_SHIFT | 279u << 1),
     LHZU_OPCODE  = (41u << OPCODE_SHIFT),
+    LHBRX_OPCODE = (31u << OPCODE_SHIFT |  790 << 1),
 
     LBZ_OPCODE   = (34u << OPCODE_SHIFT),
     LBZX_OPCODE  = (31u << OPCODE_SHIFT |  87u << 1),
@@ -1364,11 +1366,17 @@
   inline void lwax( Register d, Register s1, Register s2);
   inline void lwa(  Register d, int si16,    Register s1);
 
+  // 4 bytes reversed
+  inline void lwbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhzx( Register d, Register s1, Register s2);
   inline void lhz(  Register d, int si16,    Register s1);
   inline void lhzu( Register d, int si16,    Register s1);
 
+  // 2 bytes reversed
+  inline void lhbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhax( Register d, Register s1, Register s2);
   inline void lha(  Register d, int si16,    Register s1);
@@ -1858,10 +1866,12 @@
   inline void lwz(  Register d, int si16);
   inline void lwax( Register d, Register s2);
   inline void lwa(  Register d, int si16);
+  inline void lwbrx(Register d, Register s2);
   inline void lhzx( Register d, Register s2);
   inline void lhz(  Register d, int si16);
   inline void lhax( Register d, Register s2);
   inline void lha(  Register d, int si16);
+  inline void lhbrx(Register d, Register s2);
   inline void lbzx( Register d, Register s2);
   inline void lbz(  Register d, int si16);
   inline void ldx(  Register d, Register s2);
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -263,10 +263,14 @@
 inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16,    Register s1) { emit_int32(LWA_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
 
+inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16,    Register s1) { emit_int32(LHZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
 
+inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16,    Register s1) { emit_int32(LHA_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhau( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
@@ -736,10 +740,12 @@
 inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16   ) { emit_int32( LWA_OPCODE  | rt(d) | ds(si16));}
+inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16   ) { emit_int32( LHZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16   ) { emit_int32( LHA_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lbz(  Register d, int si16   ) { emit_int32( LBZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::ld(   Register d, int si16   ) { emit_int32( LD_OPCODE   | rt(d) | ds(si16));}
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -119,9 +119,15 @@
     // Call the Interpreter::remove_activation_preserving_args_entry()
     // func to get the address of the same-named entrypoint in the
     // generated interpreter code.
+#if defined(ABI_ELFv2)
+    call_c(CAST_FROM_FN_PTR(address,
+                            Interpreter::remove_activation_preserving_args_entry),
+           relocInfo::none);
+#else
     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
                             Interpreter::remove_activation_preserving_args_entry),
            relocInfo::none);
+#endif
 
     // Jump to Interpreter::_remove_activation_preserving_args_entry.
     mtctr(R3_RET);
@@ -331,29 +337,40 @@
 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lhbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lhbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsh(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (is_signed == Signed) {
     lha(Rdst, bcp_offset, R14_bcp);
   } else {
     lhz(Rdst, bcp_offset, R14_bcp);
   }
-#if 0
-  assert(Rtmp != Rdst, "need separate temp register");
-  Register Rfirst = Rtmp;
-  lbz(Rfirst, bcp_offset, R14_bcp); // first byte
-  lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
-
-  // Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
-  rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
-  if (is_signed == Signed) {
-    extsh(Rdst, Rdst);
-  }
 #endif
 }
 
 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lwbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lwbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (bcp_offset & 3) { // Offset unaligned?
     load_const_optimized(Rdst, bcp_offset);
@@ -369,18 +386,26 @@
       lwz(Rdst, bcp_offset, R14_bcp);
     }
   }
+#endif
 }
 
+
 // Load the constant pool cache index from the bytecode stream.
 //
 // Kills / writes:
 //   - Rdst, Rscratch
 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  // Cache index is always in the native format, courtesy of Rewriter.
   if (index_size == sizeof(u2)) {
-    get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
+    lhz(Rdst, bcp_offset, R14_bcp);
   } else if (index_size == sizeof(u4)) {
-    get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
+    if (bcp_offset & 3) {
+      load_const_optimized(Rdst, bcp_offset);
+      lwax(Rdst, R14_bcp, Rdst);
+    } else {
+      lwa(Rdst, bcp_offset, R14_bcp);
+    }
     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     nand(Rdst, Rdst, Rdst); // convert to plain index
   } else if (index_size == sizeof(u1)) {
@@ -397,6 +422,29 @@
   add(cache, R27_constPoolCache, cache);
 }
 
+// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
+// from (Rsrc)+offset.
+void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
+                                       signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (offset) {
+    load_const_optimized(Rdst, offset);
+    lwbrx(Rdst, Rdst, Rsrc);
+  } else {
+    lwbrx(Rdst, Rsrc);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
+  if (is_signed == Signed) {
+    lwa(Rdst, offset, Rsrc);
+  } else {
+    lwz(Rdst, offset, Rsrc);
+  }
+#endif
+}
+
 // Load object from cpool->resolved_references(index).
 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
   assert_different_registers(result, index);
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -130,6 +130,7 @@
 
   void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
 
+  void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
 
   // common code
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Fri Aug 08 10:35:05 2014 -0700
@@ -1283,8 +1283,6 @@
 
 bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
-  Compile *C = ra_->C;
-
   iRegPdstOper *op_dst = new iRegPdstOper();
   MachNode *m1 = new loadToc_hiNode();
   MachNode *m2 = new loadToc_loNode();
@@ -2229,7 +2227,7 @@
 }
 /* TODO: PPC port
 // Make a new machine dependent decode node (with its operands).
-MachTypeNode *Matcher::make_decode_node(Compile *C) {
+MachTypeNode *Matcher::make_decode_node() {
   assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
          "This method is only implemented for unscaled cOops mode so far");
   MachTypeNode *decode = new decodeN_unscaledNode();
@@ -2593,7 +2591,7 @@
   MachNode        *_last;
 } loadConLNodesTuple;
 
-loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
+loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
                                              OptoReg::Name reg_second, OptoReg::Name reg_first) {
   loadConLNodesTuple nodes;
 
@@ -2669,7 +2667,7 @@
   enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
     // Create new nodes.
     loadConLNodesTuple loadConLNodes =
-      loadConLNodesTuple_create(C, ra_, n_toc, op_src,
+      loadConLNodesTuple_create(ra_, n_toc, op_src,
                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
@@ -3391,7 +3389,7 @@
     immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
 
     loadConLNodesTuple loadConLNodes =
-      loadConLNodesTuple_create(C, ra_, n_toc, op_repl,
+      loadConLNodesTuple_create(ra_, n_toc, op_repl,
                                 ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
@@ -3611,7 +3609,7 @@
 
     // Create the nodes for loading the IC from the TOC.
     loadConLNodesTuple loadConLNodes_IC =
-      loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
+      loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
                                 OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
 
     // Create the call node.
@@ -3765,7 +3763,7 @@
 #if defined(ABI_ELFv2)
     jlong entry_address = (jlong) this->entry_point();
     assert(entry_address, "need address here");
-    loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
+    loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 #else
     // Get the struct that describes the function we are about to call.
@@ -3777,13 +3775,13 @@
     loadConLNodesTuple loadConLNodes_Toc;
 
     // Create nodes and operands for loading the entry point.
-    loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
+    loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
                                                     OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
 
 
     // Create nodes and operands for loading the env pointer.
     if (fd->env() != NULL) {
-      loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->env()),
+      loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
                                                     OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
     } else {
       loadConLNodes_Env._large_hi = NULL;
@@ -3796,7 +3794,7 @@
     }
 
     // Create nodes and operands for loading the Toc point.
-    loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->toc()),
+    loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
                                                   OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
 #endif // ABI_ELFv2
     // mtctr node
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -176,8 +176,12 @@
   const Register size  = R12_scratch2;
   __ get_cache_and_index_at_bcp(cache, 1, index_size);
 
-  // Big Endian (get least significant byte of 64 bit value):
+  // Get least significant byte of 64 bit value:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
+#else
   __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+#endif
   __ sldi(size, size, Interpreter::logStackElementSize);
   __ add(R15_esp, R15_esp, size);
   __ dispatch_next(state, step);
@@ -858,7 +862,9 @@
   // Our signature handlers copy required arguments to the C stack
   // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
   __ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
   __ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
 
   __ call_stub(signature_handler_fd);
 
@@ -1020,8 +1026,13 @@
   // native result across the call. No oop is present.
 
   __ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#else
   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
             relocInfo::none);
+#endif
 
   __ bind(sync_check_done);
 
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -189,8 +189,12 @@
       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
-      // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
+      // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
+#if defined(VM_LITTLE_ENDIAN)
+      __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
+#else
       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
+#endif
       __ cmpwi(CCR0, Rnew_bc, 0);
       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
       __ beq(CCR0, L_patch_done);
@@ -1839,8 +1843,8 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Load lo & hi.
-  __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
-  __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
+  __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
+  __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // Check for default case (=index outside [low,high]).
   __ cmpw(CCR0, R17_tos, Rlow_byte);
@@ -1854,12 +1858,17 @@
   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
   __ sldi(Rindex, Rindex, LogBytesPerInt);
   __ addi(Rindex, Rindex, 3 * BytesPerInt);
+#if defined(VM_LITTLE_ENDIAN)
+  __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
+  __ extsw(Roffset, Roffset);
+#else
   __ lwax(Roffset, Rdef_offset_addr, Rindex);
+#endif
   __ b(Ldispatch);
 
   __ bind(Ldefault_case);
   __ profile_switch_default(Rhigh_byte, Rscratch1);
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
 
   __ bind(Ldispatch);
 
@@ -1875,12 +1884,11 @@
 // Table switch using linear search through cases.
 // Bytecode stream format:
 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
-// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
+// Note: Everything is big-endian format here.
 void TemplateTable::fast_linearswitch() {
   transition(itos, vtos);
 
-  Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
-
+  Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
   Register Rcount           = R3_ARG1,
            Rcurrent_pair    = R4_ARG2,
            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
@@ -1894,47 +1902,40 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Setup loop counter and limit.
-  __ lwz(Rcount, BytesPerInt, Rdef_offset_addr);    // Load count.
+  __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
 
-  // Set up search loop.
-  __ cmpwi(CCR0, Rcount, 0);
-  __ beq(CCR0, Ldefault_case);
-
   __ mtctr(Rcount);
-
-  // linear table search
-  __ bind(Lsearch_loop);
-
-  __ lwz(Rvalue, 0, Rcurrent_pair);
-  __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
-
-  __ cmpw(CCR0, Rvalue, Rcmp_value);
-  __ beq(CCR0, Lfound);
-
-  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
-  __ bdnz(Lsearch_loop);
-
-  // default case
+  __ cmpwi(CCR0, Rcount, 0);
+  __ bne(CCR0, Lloop_entry);
+
+  // Default case
   __ bind(Ldefault_case);
-
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
   if (ProfileInterpreter) {
     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
-    __ b(Lcontinue_execution);
   }
-
-  // Entry found, skip Roffset bytecodes and continue.
-  __ bind(Lfound);
+  __ b(Lcontinue_execution);
+
+  // Next iteration
+  __ bind(Lsearch_loop);
+  __ bdz(Ldefault_case);
+  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
+  __ bind(Lloop_entry);
+  __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
+  __ cmpw(CCR0, Rvalue, Rcmp_value);
+  __ bne(CCR0, Lsearch_loop);
+
+  // Found, load offset.
+  __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
+  // Calculate case index and profile
+  __ mfctr(Rcurrent_pair);
   if (ProfileInterpreter) {
-    // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
-    // beyond the actual current pair due to the auto update load above!
-    __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
-    __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
-    __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
+    __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
-    __ bind(Lcontinue_execution);
   }
+
+  __ bind(Lcontinue_execution);
   __ add(R14_bcp, Roffset, R14_bcp);
   __ dispatch_next(vtos);
 }
@@ -1990,7 +1991,7 @@
 
   // initialize i & j
   __ li(Ri,0);
-  __ lwz(Rj, -BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // and start.
   Label entry;
@@ -2007,7 +2008,11 @@
     //   i = h;
     // }
     __ sldi(Rscratch, Rh, log_entry_size);
+#if defined(VM_LITTLE_ENDIAN)
+    __ lwbrx(Rscratch, Rscratch, Rarray);
+#else
     __ lwzx(Rscratch, Rscratch, Rarray);
+#endif
 
     // if (key < current value)
     //   Rh = Rj
@@ -2039,20 +2044,20 @@
   // Ri = value offset
   __ sldi(Ri, Ri, log_entry_size);
   __ add(Ri, Ri, Rarray);
-  __ lwz(Rscratch, 0, Ri);
+  __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
 
   Label not_found;
   // Ri = offset offset
   __ cmpw(CCR0, Rkey, Rscratch);
   __ beq(CCR0, not_found);
   // entry not found -> j = default offset
-  __ lwz(Rj, -2 * BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ b(default_case);
 
   __ bind(not_found);
   // entry found -> j = offset
   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
-  __ lwz(Rj, BytesPerInt, Ri);
+  __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   if (ProfileInterpreter) {
     __ b(continue_execution);
@@ -2147,8 +2152,11 @@
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   // We are resolved if the indices offset contains the current bytecode.
-  // Big Endian:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
+#else
   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
+#endif
   // Acquire by cmp-br-isync (see below).
   __ cmpdi(CCR0, Rscratch, (int)bytecode());
   __ beq(CCR0, Lresolved);
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Aug 08 10:35:05 2014 -0700
@@ -6184,7 +6184,11 @@
   ins_cost(DEFAULT_COST * 3/2);
   format %{ "SET    $con,$dst\t! non-oop ptr" %}
   ins_encode %{
-    __ set($con$$constant, $dst$$Register);
+    if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
+      __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
+    } else {
+      __ set($con$$constant, $dst$$Register);
+    }
   %}
   ins_pipe(loadConP);
 %}
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -3854,6 +3854,15 @@
 }
 
 // Carry-Less Multiplication Quadword
+void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
+  assert(VM_Version::supports_clmul(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
+// Carry-Less Multiplication Quadword
 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
   bool vector256 = false;
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1837,6 +1837,7 @@
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
 
   // Carry-Less Multiplication Quadword
+  void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
 
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -7316,17 +7316,34 @@
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
-  vpclmulhdq(xtmp, xK, xcrc); // [123:64]
-  vpclmulldq(xcrc, xK, xcrc); // [63:0]
-  vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc); // [123:64]
+    vpclmulldq(xcrc, xK, xcrc); // [63:0]
+    vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);   // [123:64]
+    pclmulldq(xcrc, xK);   // [63:0]
+    pxor(xcrc, xtmp);
+    movdqu(xtmp, Address(buf, offset));
+    pxor(xcrc, xtmp);
+  }
 }
 
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
-  vpclmulhdq(xtmp, xK, xcrc);
-  vpclmulldq(xcrc, xK, xcrc);
-  pxor(xcrc, xbuf);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc);
+    vpclmulldq(xcrc, xK, xcrc);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);
+    pclmulldq(xcrc, xK);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  }
 }
 
 /**
@@ -7444,9 +7461,17 @@
   // Fold 128 bits in xmm1 down into 32 bits in crc register.
   BIND(L_fold_128b);
   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
-  vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
-  vpand(xmm3, xmm0, xmm2, false /* vector256 */);
-  vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  if (UseAVX > 0) {
+    vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
+    vpand(xmm3, xmm0, xmm2, false /* vector256 */);
+    vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  } else {
+    movdqa(xmm2, xmm0);
+    pclmulqdq(xmm2, xmm1, 0x1);
+    movdqa(xmm3, xmm0);
+    pand(xmm3, xmm2);
+    pclmulqdq(xmm0, xmm3, 0x1);
+  }
   psrldq(xmm1, 8);
   psrldq(xmm2, 4);
   pxor(xmm0, xmm1);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -966,6 +966,16 @@
   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
   void mulss(XMMRegister dst, AddressLiteral src);
 
+  // Carry-Less Multiplication Quadword
+  void pclmulldq(XMMRegister dst, XMMRegister src) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::pclmulqdq(dst, src, 0x00);
+  }
+  void pclmulhdq(XMMRegister dst, XMMRegister src) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::pclmulqdq(dst, src, 0x11);
+  }
+
   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, AddressLiteral src);
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -559,7 +559,7 @@
     FLAG_SET_DEFAULT(UseCLMUL, false);
   }
 
-  if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
+  if (UseCLMUL && (UseSSE > 2)) {
     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
       UseCRC32Intrinsics = true;
     }
@@ -805,6 +805,21 @@
         }
       }
     }
+    if ((cpu_family() == 0x06) &&
+        ((extended_cpu_model() == 0x36) || // Centerton
+         (extended_cpu_model() == 0x37) || // Silvermont
+         (extended_cpu_model() == 0x4D))) {
+#ifdef COMPILER2
+      if (FLAG_IS_DEFAULT(OptoScheduling)) {
+        OptoScheduling = true;
+      }
+#endif
+      if (supports_sse4_2()) { // Silvermont
+        if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+          UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
+        }
+      }
+    }
   }
 
   // Use count leading zeros count instruction if available.
@@ -892,23 +907,25 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
-    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
+  if (is_intel() && cpu_family() == 6 && supports_sse3()) {
+    if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
 #ifdef _LP64
       AllocatePrefetchDistance = 384;
 #else
       AllocatePrefetchDistance = 320;
 #endif
     }
-    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+    if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
       AllocatePrefetchDistance = 192;
       AllocatePrefetchLines = 4;
+    }
 #ifdef COMPILER2
-      if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+    if (supports_sse4_2()) {
+      if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
       }
+    }
 #endif
-    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -997,7 +997,7 @@
   int nopcnt = 0;
   for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; nopcnt++ );
 
-  fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d], Compile *C) {\n", nopcnt);
+  fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d]) {\n", nopcnt);
   int i = 0;
   for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; i++ ) {
     fprintf(fp_cpp, "  nop_list[%d] = (MachNode *) new %sNode();\n", i, nop);
@@ -1369,7 +1369,7 @@
         fprintf(fp, "        ra_->add_reference(root, inst%d);\n", inst_num);
         fprintf(fp, "        ra_->set_oop (root, ra_->is_oop(inst%d));\n", inst_num);
         fprintf(fp, "        ra_->set_pair(root->_idx, ra_->get_reg_second(inst%d), ra_->get_reg_first(inst%d));\n", inst_num, inst_num);
-        fprintf(fp, "        root->_opnds[0] = inst%d->_opnds[0]->clone(C); // result\n", inst_num);
+        fprintf(fp, "        root->_opnds[0] = inst%d->_opnds[0]->clone(); // result\n", inst_num);
         fprintf(fp, "        // ----- Done with initial setup -----\n");
       } else {
         if( (op_form == NULL) || (op_form->is_base_constant(globals) == Form::none) ) {
@@ -1382,7 +1382,7 @@
         } else {
           fprintf(fp, "        // no ideal edge for constants after matching\n");
         }
-        fprintf(fp, "        root->_opnds[%d] = inst%d->_opnds[%d]->clone(C);\n",
+        fprintf(fp, "        root->_opnds[%d] = inst%d->_opnds[%d]->clone();\n",
                 opnds_index, inst_num, inst_op_num );
       }
       ++opnds_index;
@@ -1402,7 +1402,7 @@
 // Define the Peephole method for an instruction node
 void ArchDesc::definePeephole(FILE *fp, InstructForm *node) {
   // Generate Peephole function header
-  fprintf(fp, "MachNode *%sNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {\n", node->_ident);
+  fprintf(fp, "MachNode *%sNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {\n", node->_ident);
   fprintf(fp, "  bool  matches = true;\n");
 
   // Identify the maximum instruction position,
@@ -1593,7 +1593,7 @@
       }
 
       const char *resultOper = new_inst->reduce_result();
-      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n",
+      fprintf(fp,"  n%d->set_opnd_array(0, state->MachOperGenerator(%s));\n",
               cnt, machOperEnum(resultOper));
 
       // get the formal operand NameList
@@ -1634,7 +1634,7 @@
           // If there is no use of the created operand, just skip it
           if (new_pos != NameList::Not_in_list) {
             //Copy the operand from the original made above
-            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
+            fprintf(fp,"  n%d->set_opnd_array(%d, op%d->clone()); // %s\n",
                     cnt, new_pos, exp_pos-node->num_opnds(), opid);
             // Check for who defines this operand & add edge if needed
             fprintf(fp,"  if(tmp%d != NULL)\n", exp_pos);
@@ -1662,7 +1662,7 @@
           new_pos = new_inst->operand_position(parameter,Component::USE);
           if (new_pos != -1) {
             // Copy the operand from the ExpandNode to the new node
-            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+            fprintf(fp,"  n%d->set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
                     cnt, new_pos, exp_pos, opid);
             // For each operand add appropriate input edges by looking at tmp's
             fprintf(fp,"  if(tmp%d == this) {\n", exp_pos);
@@ -1729,14 +1729,14 @@
           declared_def = true;
         }
         if (op && op->_interface && op->_interface->is_RegInterface()) {
-          fprintf(fp,"  def = new MachTempNode(state->MachOperGenerator( %s, C ));\n",
+          fprintf(fp,"  def = new MachTempNode(state->MachOperGenerator(%s));\n",
                   machOperEnum(op->_ident));
           fprintf(fp,"  add_req(def);\n");
           // The operand for TEMP is already constructed during
           // this mach node construction, see buildMachNode().
           //
           // int idx  = node->operand_position_format(comp->_name);
-          // fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
+          // fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator(%s));\n",
           //         idx, machOperEnum(op->_ident));
         } else {
           assert(false, "can't have temps which aren't registers");
@@ -1802,7 +1802,7 @@
         uint j = node->unique_opnds_idx(i);
         // unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
         if( j != node->unique_opnds_idx(j) ) {
-          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
+          fprintf(fp,"  set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
                   new_num_opnds, i, comp->_name);
           // delete not unique edges here
           fprintf(fp,"  for(unsigned i = 0; i < num%d; i++) {\n", i);
@@ -2839,12 +2839,12 @@
 
 // generate code to create a clone for a class derived from MachOper
 //
-// (0)  MachOper  *MachOperXOper::clone(Compile* C) const {
+// (0)  MachOper  *MachOperXOper::clone() const {
 // (1)    return new MachXOper( _ccode, _c0, _c1, ..., _cn);
 // (2)  }
 //
 static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
-  fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
+  fprintf(fp,"MachOper *%sOper::clone() const {\n", oper._ident);
   // Check for constants that need to be copied over
   const int  num_consts    = oper.num_consts(globalNames);
   const bool is_ideal_bool = oper.is_ideal_bool();
@@ -3043,7 +3043,7 @@
 static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "// Copy _idx, inputs and operands to new node\n");
-  fprintf(fp_cpp, "void MachNode::fill_new_machnode( MachNode* node, Compile* C) const {\n");
+  fprintf(fp_cpp, "void MachNode::fill_new_machnode(MachNode* node) const {\n");
   if( !used ) {
     fprintf(fp_cpp, "  // This architecture does not have cisc or short branch instructions\n");
     fprintf(fp_cpp, "  ShouldNotCallThis();\n");
@@ -3064,7 +3064,7 @@
     fprintf(fp_cpp, "  MachOper **to = node->_opnds;\n");
     fprintf(fp_cpp, "  for( int i = 0; i < nopnds; i++ ) {\n");
     fprintf(fp_cpp, "    if( i != cisc_operand() ) \n");
-    fprintf(fp_cpp, "      to[i] = _opnds[i]->clone(C);\n");
+    fprintf(fp_cpp, "      to[i] = _opnds[i]->clone();\n");
     fprintf(fp_cpp, "  }\n");
     fprintf(fp_cpp, "}\n");
   }
@@ -3105,7 +3105,7 @@
     if ( strcmp(oper->_ident,"label") == 0 ) {
       defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
 
-      fprintf(fp,"MachOper  *%sOper::clone(Compile* C) const {\n", oper->_ident);
+      fprintf(fp,"MachOper  *%sOper::clone() const {\n", oper->_ident);
       fprintf(fp,"  return  new %sOper(_label, _block_num);\n", oper->_ident);
       fprintf(fp,"}\n");
 
@@ -3124,7 +3124,7 @@
     if ( strcmp(oper->_ident,"method") == 0 ) {
       defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
 
-      fprintf(fp,"MachOper  *%sOper::clone(Compile* C) const {\n", oper->_ident);
+      fprintf(fp,"MachOper  *%sOper::clone() const {\n", oper->_ident);
       fprintf(fp,"  return  new %sOper(_method);\n", oper->_ident);
       fprintf(fp,"}\n");
 
@@ -3845,7 +3845,7 @@
           "// that invokes 'new' on the corresponding class constructor.\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "MachOper *State::MachOperGenerator");
-  fprintf(fp_cpp, "(int opcode, Compile* C)");
+  fprintf(fp_cpp, "(int opcode)");
   fprintf(fp_cpp, "{\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "  switch(opcode) {\n");
@@ -3921,7 +3921,7 @@
       int         index  = clist.operand_position(comp->_name, comp->_usedef, inst);
       const char *opcode = machOperEnum(comp->_type);
       fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
-      fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
+      fprintf(fp_cpp, "MachOperGenerator(%s));\n", opcode);
       }
   }
   else if ( inst->is_chain_of_constant(_globalNames, opType) ) {
@@ -3978,7 +3978,7 @@
     InstructForm *inst_cisc = cisc_spill_alternate();
     if (inst_cisc != NULL) {
       fprintf(fp_hpp, "  virtual int            cisc_operand() const { return %d; }\n", cisc_spill_operand());
-      fprintf(fp_hpp, "  virtual MachNode      *cisc_version(int offset, Compile* C);\n");
+      fprintf(fp_hpp, "  virtual MachNode      *cisc_version(int offset);\n");
       fprintf(fp_hpp, "  virtual void           use_cisc_RegMask();\n");
       fprintf(fp_hpp, "  virtual const RegMask *cisc_RegMask() const { return _cisc_RegMask; }\n");
     }
@@ -4008,7 +4008,7 @@
     // Construct CISC version of this instruction
     fprintf(fp_cpp, "\n");
     fprintf(fp_cpp, "// Build CISC version of this instruction\n");
-    fprintf(fp_cpp, "MachNode *%sNode::cisc_version( int offset, Compile* C ) {\n", this->_ident);
+    fprintf(fp_cpp, "MachNode *%sNode::cisc_version(int offset) {\n", this->_ident);
     // Create the MachNode object
     fprintf(fp_cpp, "  %sNode *node = new %sNode();\n", name, name);
     // Fill in the bottom_type where requested
@@ -4023,7 +4023,7 @@
 
     fprintf(fp_cpp, "\n");
     fprintf(fp_cpp, "  // Copy _idx, inputs and operands to new node\n");
-    fprintf(fp_cpp, "  fill_new_machnode(node, C);\n");
+    fprintf(fp_cpp, "  fill_new_machnode(node);\n");
     // Construct operand to access [stack_pointer + offset]
     fprintf(fp_cpp, "  // Construct operand to access [stack_pointer + offset]\n");
     fprintf(fp_cpp, "  node->set_opnd_array(cisc_operand(), new %sOper(offset));\n", cisc_oper_name);
@@ -4042,7 +4042,7 @@
 // Build prototypes for short branch methods
 void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
   if (has_short_branch_form()) {
-    fprintf(fp_hpp, "  virtual MachNode      *short_branch_version(Compile* C);\n");
+    fprintf(fp_hpp, "  virtual MachNode      *short_branch_version();\n");
   }
 }
 
@@ -4055,7 +4055,7 @@
 
     // Construct short_branch_version() method.
     fprintf(fp_cpp, "// Build short branch version of this instruction\n");
-    fprintf(fp_cpp, "MachNode *%sNode::short_branch_version(Compile* C) {\n", this->_ident);
+    fprintf(fp_cpp, "MachNode *%sNode::short_branch_version() {\n", this->_ident);
     // Create the MachNode object
     fprintf(fp_cpp, "  %sNode *node = new %sNode();\n", name, name);
     if( is_ideal_if() ) {
@@ -4071,7 +4071,7 @@
     // Short branch version must use same node index for access
     // through allocator's tables
     fprintf(fp_cpp, "  // Copy _idx, inputs and operands to new node\n");
-    fprintf(fp_cpp, "  fill_new_machnode(node, C);\n");
+    fprintf(fp_cpp, "  fill_new_machnode(node);\n");
 
     // Return result and exit scope
     fprintf(fp_cpp, "  return node;\n");
@@ -4097,7 +4097,7 @@
           "// that invokes 'new' on the corresponding class constructor.\n");
   fprintf(fp_cpp, "\n");
   fprintf(fp_cpp, "MachNode *State::MachNodeGenerator");
-  fprintf(fp_cpp, "(int opcode, Compile* C)");
+  fprintf(fp_cpp, "(int opcode)");
   fprintf(fp_cpp, "{\n");
   fprintf(fp_cpp, "  switch(opcode) {\n");
 
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1119,7 +1119,7 @@
   fprintf(fp_hpp, "    _nop_count = %d\n",
     _pipeline->_nopcnt);
   fprintf(fp_hpp, "  };\n\n");
-  fprintf(fp_hpp, "  static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
+  fprintf(fp_hpp, "  static void initialize_nops(MachNode *nop_list[%d]);\n\n",
     _pipeline->_nopcnt);
   fprintf(fp_hpp, "#ifndef PRODUCT\n");
   fprintf(fp_hpp, "  void dump(outputStream *st = tty) const;\n");
@@ -1240,7 +1240,7 @@
                       constant_type, _globalNames);
 
     // Clone function
-    fprintf(fp,"  virtual MachOper      *clone(Compile* C) const;\n");
+    fprintf(fp,"  virtual MachOper      *clone() const;\n");
 
     // Support setting a spill offset into a constant operand.
     // We only support setting an 'int' offset, while in the
@@ -1718,7 +1718,7 @@
 
     // If there is an explicit peephole rule, build it
     if ( instr->peepholes() != NULL ) {
-      fprintf(fp,"  virtual MachNode      *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile *C);\n");
+      fprintf(fp,"  virtual MachNode      *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);\n");
     }
 
     // Output the declaration for number of relocation entries
@@ -1863,7 +1863,7 @@
     }
     if ( instr->num_post_match_opnds() != 0
          || instr->is_chain_of_constant(_globalNames) ) {
-      fprintf(fp,"  friend MachNode *State::MachNodeGenerator(int opcode, Compile* C);\n");
+      fprintf(fp,"  friend MachNode *State::MachNodeGenerator(int opcode);\n");
     }
     if ( instr->rematerialize(_globalNames, get_registers()) ) {
       fprintf(fp,"  // Rematerialize %s\n", instr->_ident);
@@ -2071,8 +2071,8 @@
   fprintf(fp,"  DEBUG_ONLY( ~State(void); )       // Destructor\n");
   fprintf(fp,"\n");
   fprintf(fp,"  // Methods created by ADLC and invoked by Reduce\n");
-  fprintf(fp,"  MachOper *MachOperGenerator( int opcode, Compile* C );\n");
-  fprintf(fp,"  MachNode *MachNodeGenerator( int opcode, Compile* C );\n");
+  fprintf(fp,"  MachOper *MachOperGenerator(int opcode);\n");
+  fprintf(fp,"  MachNode *MachNodeGenerator(int opcode);\n");
   fprintf(fp,"\n");
   fprintf(fp,"  // Assign a state to a node, definition of method produced by ADLC\n");
   fprintf(fp,"  bool DFA( int opcode, const Node *ideal );\n");
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1613,25 +1613,22 @@
   Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
   Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
 
-  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
-  if (has_fpu_registers()) {
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-#ifdef ASSERT
-  } else {
-    // fpu register allocation is omitted because no virtual fpu registers are present
-    // just check this again...
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-    assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
-#endif
-  }
-
   // allocate cpu registers
+  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals,
+                         is_precolored_cpu_interval, is_virtual_cpu_interval);
+
+  // allocate fpu registers
+  create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals,
+                         is_precolored_fpu_interval, is_virtual_fpu_interval);
+
+  // the fpu interval allocation cannot be moved down below with the fpu section as
+  // the cpu_lsw.walk() changes interval positions.
+
   LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
   cpu_lsw.walk();
   cpu_lsw.finish_allocation();
 
   if (has_fpu_registers()) {
-    // allocate fpu registers
     LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
     fpu_lsw.walk();
     fpu_lsw.finish_allocation();
--- a/hotspot/src/share/vm/code/dependencies.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/code/dependencies.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -407,56 +407,66 @@
 // for the sake of the compiler log, print out current dependencies:
 void Dependencies::log_all_dependencies() {
   if (log() == NULL)  return;
-  ciBaseObject* args[max_arg_count];
+  ResourceMark rm;
   for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) {
     DepType dept = (DepType)deptv;
     GrowableArray<ciBaseObject*>* deps = _deps[dept];
-    if (deps->length() == 0)  continue;
+    int deplen = deps->length();
+    if (deplen == 0) {
+      continue;
+    }
     int stride = dep_args(dept);
+    GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(stride);
     for (int i = 0; i < deps->length(); i += stride) {
       for (int j = 0; j < stride; j++) {
         // flush out the identities before printing
-        args[j] = deps->at(i+j);
+        ciargs->push(deps->at(i+j));
       }
-      write_dependency_to(log(), dept, stride, args);
+      write_dependency_to(log(), dept, ciargs);
+      ciargs->clear();
     }
+    guarantee(deplen == deps->length(), "deps array cannot grow inside nested ResoureMark scope");
   }
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
   if (log == NULL) {
     return;
   }
+  ResourceMark rm;
   ciEnv* env = ciEnv::current();
-  ciBaseObject* ciargs[max_arg_count];
-  assert(nargs <= max_arg_count, "oob");
-  for (int j = 0; j < nargs; j++) {
-    if (args[j].is_oop()) {
-      ciargs[j] = env->get_object(args[j].oop_value());
+  GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(args->length());
+  for (GrowableArrayIterator<DepArgument> it = args->begin(); it != args->end(); ++it) {
+    DepArgument arg = *it;
+    if (arg.is_oop()) {
+      ciargs->push(env->get_object(arg.oop_value()));
     } else {
-      ciargs[j] = env->get_metadata(args[j].metadata_value());
+      ciargs->push(env->get_metadata(arg.metadata_value()));
     }
   }
-  Dependencies::write_dependency_to(log, dept, nargs, ciargs, witness);
+  int argslen = ciargs->length();
+  Dependencies::write_dependency_to(log, dept, ciargs, witness);
+  guarantee(argslen == ciargs->length(), "ciargs array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, ciBaseObject* args[],
+                                       GrowableArray<ciBaseObject*>* args,
                                        Klass* witness) {
-  if (log == NULL)  return;
-  assert(nargs <= max_arg_count, "oob");
-  int argids[max_arg_count];
-  int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  int j;
-  for (j = 0; j < nargs; j++) {
-    if (args[j]->is_object()) {
-      argids[j] = log->identify(args[j]->as_object());
+  if (log == NULL) {
+    return;
+  }
+  ResourceMark rm;
+  GrowableArray<int>* argids = new GrowableArray<int>(args->length());
+  for (GrowableArrayIterator<ciBaseObject*> it = args->begin(); it != args->end(); ++it) {
+    ciBaseObject* obj = *it;
+    if (obj->is_object()) {
+      argids->push(log->identify(obj->as_object()));
     } else {
-      argids[j] = log->identify(args[j]->as_metadata());
+      argids->push(log->identify(obj->as_metadata()));
     }
   }
   if (witness != NULL) {
@@ -465,16 +475,17 @@
     log->begin_elem("dependency");
   }
   log->print(" type='%s'", dep_name(dept));
-  if (ctxkj >= 0) {
-    log->print(" ctxk='%d'", argids[ctxkj]);
+  const int ctxkj = dep_context_arg(dept);  // -1 if no context arg
+  if (ctxkj >= 0 && ctxkj < argids->length()) {
+    log->print(" ctxk='%d'", argids->at(ctxkj));
   }
   // write remaining arguments, if any.
-  for (j = 0; j < nargs; j++) {
+  for (int j = 0; j < argids->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
     if (j == 1) {
-      log->print(  " x='%d'",    argids[j]);
+      log->print(  " x='%d'",    argids->at(j));
     } else {
-      log->print(" x%d='%d'", j, argids[j]);
+      log->print(" x%d='%d'", j, argids->at(j));
     }
   }
   if (witness != NULL) {
@@ -486,9 +497,12 @@
 
 void Dependencies::write_dependency_to(xmlStream* xtty,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
-  if (xtty == NULL)  return;
+  if (xtty == NULL) {
+    return;
+  }
+  ResourceMark rm;
   ttyLocker ttyl;
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
   if (witness != NULL) {
@@ -498,23 +512,24 @@
   }
   xtty->print(" type='%s'", dep_name(dept));
   if (ctxkj >= 0) {
-    xtty->object("ctxk", args[ctxkj].metadata_value());
+    xtty->object("ctxk", args->at(ctxkj).metadata_value());
   }
   // write remaining arguments, if any.
-  for (int j = 0; j < nargs; j++) {
+  for (int j = 0; j < args->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
+    DepArgument arg = args->at(j);
     if (j == 1) {
-      if (args[j].is_oop()) {
-        xtty->object("x", args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object("x", arg.oop_value());
       } else {
-        xtty->object("x", args[j].metadata_value());
+        xtty->object("x", arg.metadata_value());
       }
     } else {
       char xn[10]; sprintf(xn, "x%d", j);
-      if (args[j].is_oop()) {
-        xtty->object(xn, args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object(xn, arg.oop_value());
       } else {
-        xtty->object(xn, args[j].metadata_value());
+        xtty->object(xn, arg.metadata_value());
       }
     }
   }
@@ -525,7 +540,7 @@
   xtty->end_elem();
 }
 
-void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
+void Dependencies::print_dependency(DepType dept, GrowableArray<DepArgument>* args,
                                     Klass* witness) {
   ResourceMark rm;
   ttyLocker ttyl;   // keep the following output all in one block
@@ -534,8 +549,8 @@
                 dep_name(dept));
   // print arguments
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  for (int j = 0; j < nargs; j++) {
-    DepArgument arg = args[j];
+  for (int j = 0; j < args->length(); j++) {
+    DepArgument arg = args->at(j);
     bool put_star = false;
     if (arg.is_null())  continue;
     const char* what;
@@ -571,31 +586,33 @@
 void Dependencies::DepStream::log_dependency(Klass* witness) {
   if (_deps == NULL && xtty == NULL)  return;  // fast cutout for runtime
   ResourceMark rm;
-  int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  const int nargs = argument_count();
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
     if (type() == call_site_target_value) {
-      args[j] = argument_oop(j);
+      args->push(argument_oop(j));
     } else {
-      args[j] = argument(j);
+      args->push(argument(j));
     }
   }
+  int argslen = args->length();
   if (_deps != NULL && _deps->log() != NULL) {
-    Dependencies::write_dependency_to(_deps->log(),
-                                      type(), nargs, args, witness);
+    Dependencies::write_dependency_to(_deps->log(), type(), args, witness);
   } else {
-    Dependencies::write_dependency_to(xtty,
-                                      type(), nargs, args, witness);
+    Dependencies::write_dependency_to(xtty, type(), args, witness);
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) {
+  ResourceMark rm;
   int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
-    args[j] = argument(j);
+    args->push(argument(j));
   }
-  Dependencies::print_dependency(type(), nargs, args, witness);
+  int argslen = args->length();
+  Dependencies::print_dependency(type(), args, witness);
   if (verbose) {
     if (_code != NULL) {
       tty->print("  code: ");
@@ -603,6 +620,7 @@
       tty->cr();
     }
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 
--- a/hotspot/src/share/vm/code/dependencies.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/code/dependencies.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -369,20 +369,36 @@
   void copy_to(nmethod* nm);
 
   void log_all_dependencies();
-  void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) {
-    write_dependency_to(log(), dept, nargs, args);
+
+  void log_dependency(DepType dept, GrowableArray<ciBaseObject*>* args) {
+    ResourceMark rm;
+    int argslen = args->length();
+    write_dependency_to(log(), dept, args);
+    guarantee(argslen == args->length(),
+              "args array cannot grow inside nested ResoureMark scope");
   }
+
   void log_dependency(DepType dept,
                       ciBaseObject* x0,
                       ciBaseObject* x1 = NULL,
                       ciBaseObject* x2 = NULL) {
-    if (log() == NULL)  return;
-    ciBaseObject* args[max_arg_count];
-    args[0] = x0;
-    args[1] = x1;
-    args[2] = x2;
-    assert(2 < max_arg_count, "");
-    log_dependency(dept, dep_args(dept), args);
+    if (log() == NULL) {
+      return;
+    }
+    ResourceMark rm;
+    GrowableArray<ciBaseObject*>* ciargs =
+                new GrowableArray<ciBaseObject*>(dep_args(dept));
+    assert (x0 != NULL, "no log x0");
+    ciargs->push(x0);
+
+    if (x1 != NULL) {
+      ciargs->push(x1);
+    }
+    if (x2 != NULL) {
+      ciargs->push(x2);
+    }
+    assert(ciargs->length() == dep_args(dept), "");
+    log_dependency(dept, ciargs);
   }
 
   class DepArgument : public ResourceObj {
@@ -405,20 +421,8 @@
     Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; }
   };
 
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, ciBaseObject* args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(xmlStream* xtty,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
   static void print_dependency(DepType dept,
-                               int nargs, DepArgument args[],
+                               GrowableArray<DepArgument>* args,
                                Klass* witness = NULL);
 
  private:
@@ -427,6 +431,18 @@
 
   static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x);
 
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<ciBaseObject*>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(xmlStream* xtty,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
  public:
   // Use this to iterate over an nmethod's dependency set.
   // Works on new and old dependency sets.
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -357,7 +357,7 @@
 
   // Make sure the state is a MergeMem for parsing.
   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
-    Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
+    Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
     C->initial_gvn()->set_type_bottom(mem);
     map->set_req(TypeFunc::Memory, mem);
   }
--- a/hotspot/src/share/vm/opto/callnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -688,7 +688,7 @@
     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
 
   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
-    assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
+    assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
     // 2nd half of doubles and longs
     return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
 
@@ -778,7 +778,7 @@
 }
 
 // Returns the unique CheckCastPP of a call
-// or 'this' if there are several CheckCastPP
+// or 'this' if there are several CheckCastPP or unexpected uses
 // or returns NULL if there is no one.
 Node *CallNode::result_cast() {
   Node *cast = NULL;
@@ -794,6 +794,13 @@
         return this;  // more than 1 CheckCastPP
       }
       cast = use;
+    } else if (!use->is_Initialize() &&
+               !use->is_AddP()) {
+      // Expected uses are restricted to a CheckCastPP, an Initialize
+      // node, and AddP nodes. If we encounter any other use (a Phi
+      // node can be seen in rare cases) return this to prevent
+      // incorrect optimizations.
+      return this;
     }
   }
   return cast;
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,6 +108,7 @@
 
         rreq++;                 // One more input to Region
       } // Found a region to merge into Region
+      igvn->_worklist.push(r);
       // Clobber pointer to the now dead 'r'
       region->set_req(i, phase->C->top());
     }
@@ -449,6 +450,7 @@
   // Remove TOP or NULL input paths. If only 1 input path remains, this Region
   // degrades to a copy.
   bool add_to_worklist = false;
+  bool modified = false;
   int cnt = 0;                  // Count of values merging
   DEBUG_ONLY( int cnt_orig = req(); ) // Save original inputs count
   int del_it = 0;               // The last input path we delete
@@ -459,6 +461,7 @@
       // Remove useless control copy inputs
       if( n->is_Region() && n->as_Region()->is_copy() ) {
         set_req(i, n->nonnull_req());
+        modified = true;
         i--;
         continue;
       }
@@ -466,12 +469,14 @@
         Node *call = n->in(0);
         if (call->is_Call() && call->as_Call()->entry_point() == OptoRuntime::rethrow_stub()) {
           set_req(i, call->in(0));
+          modified = true;
           i--;
           continue;
         }
       }
       if( phase->type(n) == Type::TOP ) {
         set_req(i, NULL);       // Ignore TOP inputs
+        modified = true;
         i--;
         continue;
       }
@@ -691,7 +696,7 @@
     }
   }
 
-  return NULL;
+  return modified ? this : NULL;
 }
 
 
@@ -1871,7 +1876,7 @@
           igvn->register_new_node_with_optimizer(new_base);
           hook->add_req(new_base);
         }
-        MergeMemNode* result = MergeMemNode::make(phase->C, new_base);
+        MergeMemNode* result = MergeMemNode::make(new_base);
         for (uint i = 1; i < req(); ++i) {
           Node *ii = in(i);
           if (ii->is_MergeMem()) {
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1620,7 +1620,7 @@
           C->check_node_count(0, "out of nodes fixing spills");
           if (C->failing())  return;
           // Transform node
-          MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
+          MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach();
           cisc->set_req(inp,fp);          // Base register is frame pointer
           if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
             assert( cisc->oper_input_base() == 2, "Only adding one edge");
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1039,6 +1039,7 @@
 
   _node_note_array = NULL;
   _default_node_notes = NULL;
+  DEBUG_ONLY( _modified_nodes = NULL; ) // Used in Optimize()
 
   _immutable_memory = NULL; // filled in at first inquiry
 
@@ -1247,6 +1248,18 @@
     }
   }
 }
+void Compile::record_modified_node(Node* n) {
+  if (_modified_nodes != NULL && !_inlining_incrementally &&
+      n->outcnt() != 0 && !n->is_Con()) {
+    _modified_nodes->push(n);
+  }
+}
+
+void Compile::remove_modified_node(Node* n) {
+  if (_modified_nodes != NULL) {
+    _modified_nodes->remove(n);
+  }
+}
 #endif
 
 #ifndef PRODUCT
@@ -2035,6 +2048,9 @@
   // Iterative Global Value Numbering, including ideal transforms
   // Initialize IterGVN with types and values from parse-time GVN
   PhaseIterGVN igvn(initial_gvn());
+#ifdef ASSERT
+  _modified_nodes = new (comp_arena()) Unique_Node_List(comp_arena());
+#endif
   {
     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
     igvn.optimize();
@@ -2197,6 +2213,7 @@
     }
   }
 
+  DEBUG_ONLY( _modified_nodes = NULL; )
  } // (End scope of igvn; run destructor if necessary for asserts.)
 
   process_print_inlining();
@@ -2825,7 +2842,7 @@
           // oops implicit null check is not generated.
           // This will allow to generate normal oop implicit null check.
           if (Matcher::gen_narrow_oop_implicit_null_checks())
-            new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
+            new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR);
           //
           // This transformation together with CastPP transformation above
           // will generated code for implicit NULL checks for compressed oops.
@@ -2864,9 +2881,9 @@
           //    NullCheck base_reg
           //
         } else if (t->isa_oopptr()) {
-          new_in2 = ConNode::make(this, t->make_narrowoop());
+          new_in2 = ConNode::make(t->make_narrowoop());
         } else if (t->isa_klassptr()) {
-          new_in2 = ConNode::make(this, t->make_narrowklass());
+          new_in2 = ConNode::make(t->make_narrowklass());
         }
       }
       if (new_in2 != NULL) {
@@ -2899,11 +2916,11 @@
       const Type* t = in1->bottom_type();
       if (t == TypePtr::NULL_PTR) {
         assert(t->isa_oopptr(), "null klass?");
-        n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
+        n->subsume_by(ConNode::make(TypeNarrowOop::NULL_PTR), this);
       } else if (t->isa_oopptr()) {
-        n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
+        n->subsume_by(ConNode::make(t->make_narrowoop()), this);
       } else if (t->isa_klassptr()) {
-        n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
+        n->subsume_by(ConNode::make(t->make_narrowklass()), this);
       }
     }
     if (in1->outcnt() == 0) {
@@ -2964,7 +2981,7 @@
       if (d) {
         // Replace them with a fused divmod if supported
         if (Matcher::has_match_rule(Op_DivModI)) {
-          DivModINode* divmod = DivModINode::make(this, n);
+          DivModINode* divmod = DivModINode::make(n);
           d->subsume_by(divmod->div_proj(), this);
           n->subsume_by(divmod->mod_proj(), this);
         } else {
@@ -2984,7 +3001,7 @@
       if (d) {
         // Replace them with a fused divmod if supported
         if (Matcher::has_match_rule(Op_DivModL)) {
-          DivModLNode* divmod = DivModLNode::make(this, n);
+          DivModLNode* divmod = DivModLNode::make(n);
           d->subsume_by(divmod->div_proj(), this);
           n->subsume_by(divmod->mod_proj(), this);
         } else {
@@ -3010,7 +3027,7 @@
     if (n->req()-1 > 2) {
       // Replace many operand PackNodes with a binary tree for matching
       PackNode* p = (PackNode*) n;
-      Node* btp = p->binary_tree_pack(this, 1, n->req());
+      Node* btp = p->binary_tree_pack(1, n->req());
       n->subsume_by(btp, this);
     }
     break;
@@ -3035,11 +3052,11 @@
       if (t != NULL && t->is_con()) {
         juint shift = t->get_con();
         if (shift > mask) { // Unsigned cmp
-          n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
+          n->set_req(2, ConNode::make(TypeInt::make(shift & mask)));
         }
       } else {
         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
-          Node* shift = new AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
+          Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask)));
           n->set_req(2, shift);
         }
       }
@@ -4031,6 +4048,7 @@
   int j = 0;
   int identical = 0;
   int i = 0;
+  bool modified = false;
   for (; i < _expensive_nodes->length()-1; i++) {
     assert(j <= i, "can't write beyond current index");
     if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
@@ -4043,20 +4061,23 @@
       identical = 0;
     } else {
       Node* n = _expensive_nodes->at(i);
-      igvn.hash_delete(n);
-      n->set_req(0, NULL);
+      igvn.replace_input_of(n, 0, NULL);
       igvn.hash_insert(n);
+      modified = true;
     }
   }
   if (identical > 0) {
     _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
   } else if (_expensive_nodes->length() >= 1) {
     Node* n = _expensive_nodes->at(i);
-    igvn.hash_delete(n);
-    n->set_req(0, NULL);
+    igvn.replace_input_of(n, 0, NULL);
     igvn.hash_insert(n);
+    modified = true;
   }
   _expensive_nodes->trunc_to(j);
+  if (modified) {
+    igvn.optimize();
+  }
 }
 
 void Compile::add_expensive_node(Node * n) {
--- a/hotspot/src/share/vm/opto/compile.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -344,6 +344,8 @@
   VectorSet             _dead_node_list;        // Set of dead nodes
   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
                                                 // So use this to keep count and make the call O(1).
+  DEBUG_ONLY( Unique_Node_List* _modified_nodes; )  // List of nodes which inputs were modified
+
   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
   Arena                 _node_arena;            // Arena for new-space Nodes
   Arena                 _old_arena;             // Arena for old-space Nodes, lifetime during xform
@@ -766,6 +768,11 @@
   void         print_missing_nodes();
 #endif
 
+  // Record modified nodes to check that they are put on IGVN worklist
+  void         record_modified_node(Node* n) NOT_DEBUG_RETURN;
+  void         remove_modified_node(Node* n) NOT_DEBUG_RETURN;
+  DEBUG_ONLY( Unique_Node_List*   modified_nodes() const { return _modified_nodes; } )
+
   // Constant table
   ConstantTable&   constant_table() { return _constant_table; }
 
--- a/hotspot/src/share/vm/opto/connode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/connode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,7 @@
 }
 
 //------------------------------make-------------------------------------------
-ConNode *ConNode::make( Compile* C, const Type *t ) {
+ConNode *ConNode::make(const Type *t) {
   switch( t->basic_type() ) {
   case T_INT:         return new ConINode( t->is_int() );
   case T_LONG:        return new ConLNode( t->is_long() );
--- a/hotspot/src/share/vm/opto/connode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/connode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@
   virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
 
   // Polymorphic factory method:
-  static ConNode* make( Compile* C, const Type *t );
+  static ConNode* make(const Type *t);
 };
 
 //------------------------------ConINode---------------------------------------
@@ -57,7 +57,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConINode* make( Compile* C, int con ) {
+  static ConINode* make(int con) {
     return new ConINode( TypeInt::make(con) );
   }
 
@@ -71,7 +71,7 @@
   virtual int Opcode() const;
 
   // Factory methods:
-  static ConPNode* make( Compile *C ,address con ) {
+  static ConPNode* make(address con) {
     if (con == NULL)
       return new ConPNode( TypePtr::NULL_PTR ) ;
     else
@@ -105,7 +105,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConLNode* make( Compile *C ,jlong con ) {
+  static ConLNode* make(jlong con) {
     return new ConLNode( TypeLong::make(con) );
   }
 
@@ -119,7 +119,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConFNode* make( Compile *C, float con  ) {
+  static ConFNode* make(float con) {
     return new ConFNode( TypeF::make(con) );
   }
 
@@ -133,7 +133,7 @@
   virtual int Opcode() const;
 
   // Factory method:
-  static ConDNode* make( Compile *C, double con ) {
+  static ConDNode* make(double con) {
     return new ConDNode( TypeD::make(con) );
   }
 
--- a/hotspot/src/share/vm/opto/divnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/divnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -479,7 +479,10 @@
 
   if (i == 0) return NULL;      // Dividing by zero constant does not idealize
 
-  set_req(0,NULL);              // Dividing by a not-zero constant; no faulting
+  if (in(0) != NULL) {
+    phase->igvn_rehash_node_delayed(this);
+    set_req(0, NULL);           // Dividing by a not-zero constant; no faulting
+  }
 
   // Dividing by MININT does not optimize as a power-of-2 shift.
   if( i == min_jint ) return NULL;
@@ -578,7 +581,10 @@
 
   if (l == 0) return NULL;      // Dividing by zero constant does not idealize
 
-  set_req(0,NULL);              // Dividing by a not-zero constant; no faulting
+  if (in(0) != NULL) {
+    phase->igvn_rehash_node_delayed(this);
+    set_req(0, NULL);           // Dividing by a not-zero constant; no faulting
+  }
 
   // Dividing by MINLONG does not optimize as a power-of-2 shift.
   if( l == min_jlong ) return NULL;
@@ -1274,7 +1280,7 @@
 }
 
 //------------------------------make------------------------------------------
-DivModINode* DivModINode::make(Compile* C, Node* div_or_mod) {
+DivModINode* DivModINode::make(Node* div_or_mod) {
   Node* n = div_or_mod;
   assert(n->Opcode() == Op_DivI || n->Opcode() == Op_ModI,
          "only div or mod input pattern accepted");
@@ -1286,7 +1292,7 @@
 }
 
 //------------------------------make------------------------------------------
-DivModLNode* DivModLNode::make(Compile* C, Node* div_or_mod) {
+DivModLNode* DivModLNode::make(Node* div_or_mod) {
   Node* n = div_or_mod;
   assert(n->Opcode() == Op_DivL || n->Opcode() == Op_ModL,
          "only div or mod input pattern accepted");
--- a/hotspot/src/share/vm/opto/divnode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/divnode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -168,7 +168,7 @@
   virtual Node *match( const ProjNode *proj, const Matcher *m );
 
   // Make a divmod and associated projections from a div or mod.
-  static DivModINode* make(Compile* C, Node* div_or_mod);
+  static DivModINode* make(Node* div_or_mod);
 };
 
 //------------------------------DivModLNode---------------------------------------
@@ -181,7 +181,7 @@
   virtual Node *match( const ProjNode *proj, const Matcher *m );
 
   // Make a divmod and associated projections from a div or mod.
-  static DivModLNode* make(Compile* C, Node* div_or_mod);
+  static DivModLNode* make(Node* div_or_mod);
 };
 
 #endif // SHARE_VM_OPTO_DIVNODE_HPP
--- a/hotspot/src/share/vm/opto/escape.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1452,7 +1452,6 @@
     return 0;
 
   InitializeNode* ini = alloc->as_Allocate()->initialization();
-  Compile* C = _compile;
   bool visited_bottom_offset = false;
   GrowableArray<int> offsets_worklist;
 
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -591,7 +591,7 @@
         C->log()->elem("hot_throw preallocated='1' reason='%s'",
                        Deoptimization::trap_reason_name(reason));
       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
-      Node*              ex_node = _gvn.transform( ConNode::make(C, ex_con) );
+      Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 
       // Clear the detail message of the preallocated exception object.
       // Weblogic sometimes mutates the detail message of exceptions
@@ -706,7 +706,7 @@
   if (map() == NULL)  return NULL;
 
   // Clone the memory edge first
-  Node* mem = MergeMemNode::make(C, map()->memory());
+  Node* mem = MergeMemNode::make(map()->memory());
   gvn().set_type_bottom(mem);
 
   SafePointNode *clonemap = (SafePointNode*)map()->clone();
@@ -1135,7 +1135,7 @@
     return longcon((julong) offset_con);
   }
   Node* conv = _gvn.transform( new ConvI2LNode(offset));
-  Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
+  Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
   return _gvn.transform( new AndLNode(conv, mask) );
 }
 
@@ -1435,7 +1435,7 @@
 
 //------------------------------set_all_memory---------------------------------
 void GraphKit::set_all_memory(Node* newmem) {
-  Node* mergemem = MergeMemNode::make(C, newmem);
+  Node* mergemem = MergeMemNode::make(newmem);
   gvn().set_type_bottom(mergemem);
   map()->set_memory(mergemem);
 }
@@ -1464,9 +1464,9 @@
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
   } else if (require_atomic_access && bt == T_DOUBLE) {
-    ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
+    ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
   } else {
     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
   }
@@ -1488,9 +1488,9 @@
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else if (require_atomic_access && bt == T_DOUBLE) {
-    st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else {
     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
@@ -2084,9 +2084,9 @@
 void GraphKit::round_double_arguments(ciMethod* dest_method) {
   // (Note:  TypeFunc::make has a cache that makes this fast.)
   const TypeFunc* tf    = TypeFunc::make(dest_method);
-  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
+  int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
   for (int j = 0; j < nargs; j++) {
-    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
+    const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
     if( targ->basic_type() == T_DOUBLE ) {
       // If any parameters are doubles, they must be rounded before
       // the call, dstore_rounding does gvn.transform
@@ -2188,10 +2188,10 @@
     return;
   }
   const TypeFunc* tf    = TypeFunc::make(dest_method);
-  int             nargs = tf->_domain->_cnt - TypeFunc::Parms;
+  int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
-    const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
+    const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
     if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
       bool maybe_null = true;
       ciKlass* better_type = NULL;
@@ -3364,7 +3364,7 @@
     // This will allow us to observe initializations when they occur,
     // and link them properly (as a group) to the InitializeNode.
     assert(init->in(InitializeNode::Memory) == malloc, "");
-    MergeMemNode* minit_in = MergeMemNode::make(C, malloc);
+    MergeMemNode* minit_in = MergeMemNode::make(malloc);
     init->set_req(InitializeNode::Memory, minit_in);
     record_for_igvn(minit_in); // fold it up later, if possible
     Node* minit_out = memory(rawidx);
--- a/hotspot/src/share/vm/opto/idealKit.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/idealKit.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -320,7 +320,7 @@
   Node* ns = new_cvstate();
   for (uint i = 0; i < ns->req(); i++) ns->init_req(i, _cvstate->in(i));
   // We must clone memory since it will be updated as we do stores.
-  ns->set_req(TypeFunc::Memory, MergeMemNode::make(C, ns->in(TypeFunc::Memory)));
+  ns->set_req(TypeFunc::Memory, MergeMemNode::make(ns->in(TypeFunc::Memory)));
   return ns;
 }
 
@@ -359,7 +359,7 @@
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, MemNode::unordered);
+    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, MemNode::unordered);
   } else {
     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, MemNode::unordered);
   }
@@ -375,7 +375,7 @@
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
+    st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
   } else {
     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
--- a/hotspot/src/share/vm/opto/lcm.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -464,7 +464,9 @@
         iop == Op_CreateEx ||   // Create-exception must start block
         iop == Op_CheckCastPP
         ) {
-      worklist.map(i,worklist.pop());
+      // select the node n
+      // remove n from worklist and retain the order of remaining nodes
+      worklist.remove((uint)i);
       return n;
     }
 
@@ -550,7 +552,9 @@
   assert(idx >= 0, "index should be set");
   Node *n = worklist[(uint)idx];      // Get the winner
 
-  worklist.map((uint)idx, worklist.pop());     // Compress worklist
+  // select the node n
+  // remove n from worklist and retain the order of remaining nodes
+  worklist.remove((uint)idx);
   return n;
 }
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1905,7 +1905,7 @@
     Node *bolyplus1 = _gvn.transform(new BoolNode( cmpyplus1, BoolTest::eq ));
     Node* correctedsign = NULL;
     if (ConditionalMoveLimit != 0) {
-      correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
+      correctedsign = _gvn.transform(CMoveNode::make(NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
     } else {
       IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
       RegionNode *r = new RegionNode(3);
@@ -1934,7 +1934,7 @@
     // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
     Node *signresult = NULL;
     if (ConditionalMoveLimit != 0) {
-      signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
+      signresult = _gvn.transform(CMoveNode::make(NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
     } else {
       IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
       RegionNode *r = new RegionNode(3);
@@ -2268,7 +2268,7 @@
   // which could hinder other optimizations.
   // Since Math.min/max is often used with arraycopy, we want
   // tightly_coupled_allocation to be able to see beyond min/max expressions.
-  Node* cmov = CMoveNode::make(C, NULL, best_bol,
+  Node* cmov = CMoveNode::make(NULL, best_bol,
                                answer_if_false, answer_if_true,
                                TypeInt::make(lo, hi, widen));
 
--- a/hotspot/src/share/vm/opto/loopPredicate.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopPredicate.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -107,8 +107,7 @@
     rgn = new RegionNode(1);
     rgn->add_req(uncommon_proj);
     register_control(rgn, loop, uncommon_proj);
-    _igvn.hash_delete(call);
-    call->set_req(0, rgn);
+    _igvn.replace_input_of(call, 0, rgn);
     // When called from beautify_loops() idom is not constructed yet.
     if (_idom != NULL) {
       set_idom(call, rgn, dom_depth(rgn));
@@ -166,8 +165,7 @@
 
   if (new_entry == NULL) {
     // Attach if_cont to iff
-    _igvn.hash_delete(iff);
-    iff->set_req(0, if_cont);
+    _igvn.replace_input_of(iff, 0, if_cont);
     if (_idom != NULL) {
       set_idom(iff, if_cont, dom_depth(iff));
     }
@@ -194,8 +192,7 @@
     rgn = new RegionNode(1);
     register_new_node_with_optimizer(rgn);
     rgn->add_req(uncommon_proj);
-    hash_delete(call);
-    call->set_req(0, rgn);
+    replace_input_of(call, 0, rgn);
   } else {
     // Find region's edge corresponding to uncommon_proj
     for (; proj_index < rgn->req(); proj_index++)
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -924,15 +924,13 @@
   if( bol->outcnt() != 1 ) {
     bol = bol->clone();
     register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
-    _igvn.hash_delete(main_end);
-    main_end->set_req(CountedLoopEndNode::TestValue, bol);
+    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
   }
   // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
   if( cmp->outcnt() != 1 ) {
     cmp = cmp->clone();
     register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
-    _igvn.hash_delete(bol);
-    bol->set_req(1, cmp);
+    _igvn.replace_input_of(bol, 1, cmp);
   }
 
   //------------------------------
@@ -1118,8 +1116,7 @@
     Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
     BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
     register_new_node( new_bol0, pre_head->in(0) );
-    _igvn.hash_delete(pre_end);
-    pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
+    _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
     // Modify main loop guard condition
     assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
     BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
@@ -1130,8 +1127,7 @@
     BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
     BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
     register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
-    _igvn.hash_delete(main_end);
-    main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
+    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
   }
 
   // Flag main loop
@@ -1346,8 +1342,7 @@
         Node* bol2 = loop_end->in(1)->clone();
         bol2->set_req(1, cmp2);
         register_new_node(bol2, ctrl2);
-        _igvn.hash_delete(loop_end);
-        loop_end->set_req(1, bol2);
+        _igvn.replace_input_of(loop_end, 1, bol2);
       }
       // Step 3: Find the min-trip test guaranteed before a 'main' loop.
       // Make it a 1-trip test (means at least 2 trips).
@@ -1356,8 +1351,7 @@
       // can edit it's inputs directly.  Hammer in the new limit for the
       // minimum-trip guard.
       assert(opaq->outcnt() == 1, "");
-      _igvn.hash_delete(opaq);
-      opaq->set_req(1, new_limit);
+      _igvn.replace_input_of(opaq, 1, new_limit);
     }
 
     // Adjust max trip count. The trip count is intentionally rounded
@@ -1407,8 +1401,7 @@
     register_new_node( cmp2, ctrl2 );
     Node *bol2 = new BoolNode( cmp2, loop_end->test_trip() );
     register_new_node( bol2, ctrl2 );
-    _igvn.hash_delete(loop_end);
-    loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
+    _igvn.replace_input_of(loop_end, CountedLoopEndNode::TestValue, bol2);
 
     // Step 3: Find the min-trip test guaranteed before a 'main' loop.
     // Make it a 1-trip test (means at least 2 trips).
@@ -1997,8 +1990,7 @@
                                  : (Node*)new MaxINode(pre_limit, orig_limit);
     register_new_node(pre_limit, pre_ctrl);
   }
-  _igvn.hash_delete(pre_opaq);
-  pre_opaq->set_req(1, pre_limit);
+  _igvn.replace_input_of(pre_opaq, 1, pre_limit);
 
   // Note:: we are making the main loop limit no longer precise;
   // need to round up based on stride.
@@ -2027,10 +2019,9 @@
   Node *main_bol = main_cle->in(1);
   // Hacking loop bounds; need private copies of exit test
   if( main_bol->outcnt() > 1 ) {// BoolNode shared?
-    _igvn.hash_delete(main_cle);
     main_bol = main_bol->clone();// Clone a private BoolNode
     register_new_node( main_bol, main_cle->in(0) );
-    main_cle->set_req(1,main_bol);
+    _igvn.replace_input_of(main_cle, 1, main_bol);
   }
   Node *main_cmp = main_bol->in(1);
   if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -133,7 +133,7 @@
   // Return earliest legal location
   assert(early == find_non_split_ctrl(early), "unexpected early control");
 
-  if (n->is_expensive()) {
+  if (n->is_expensive() && !_verify_only && !_verify_me) {
     assert(n->in(0), "should have control input");
     early = get_early_ctrl_for_expensive(n, early);
   }
@@ -226,8 +226,7 @@
   }
 
   if (ctl != n->in(0)) {
-    _igvn.hash_delete(n);
-    n->set_req(0, ctl);
+    _igvn.replace_input_of(n, 0, ctl);
     _igvn.hash_insert(n);
   }
 
@@ -521,8 +520,7 @@
     assert(check_iff->in(1)->Opcode() == Op_Conv2B &&
            check_iff->in(1)->in(1)->Opcode() == Op_Opaque1, "");
     Node* opq = check_iff->in(1)->in(1);
-    _igvn.hash_delete(opq);
-    opq->set_req(1, bol);
+    _igvn.replace_input_of(opq, 1, bol);
     // Update ctrl.
     set_ctrl(opq, check_iff->in(0));
     set_ctrl(check_iff->in(1), check_iff->in(0));
@@ -690,7 +688,7 @@
   incr->set_req(2,stride);
   incr = _igvn.register_new_node_with_optimizer(incr);
   set_early_ctrl( incr );
-  _igvn.hash_delete(phi);
+  _igvn.rehash_node_delayed(phi);
   phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn );
 
   // If phi type is more restrictive than Int, raise to
@@ -743,8 +741,8 @@
     iffalse = iff2;
     iftrue  = ift2;
   } else {
-    _igvn.hash_delete(iffalse);
-    _igvn.hash_delete(iftrue);
+    _igvn.rehash_node_delayed(iffalse);
+    _igvn.rehash_node_delayed(iftrue);
     iffalse->set_req_X( 0, le, &_igvn );
     iftrue ->set_req_X( 0, le, &_igvn );
   }
@@ -1257,6 +1255,7 @@
       _head->del_req(i);
     }
   }
+  igvn.rehash_node_delayed(_head);
   // Transform landing pad
   igvn.register_new_node_with_optimizer(landing_pad, _head);
   // Insert landing pad into the header
@@ -1397,7 +1396,7 @@
   igvn.register_new_node_with_optimizer(r, _head);
   // Plug region into end of loop _head, followed by hot_tail
   while( _head->req() > 3 ) _head->del_req( _head->req()-1 );
-  _head->set_req(2, r);
+  igvn.replace_input_of(_head, 2, r);
   if( hot_idx ) _head->add_req(hot_tail);
 
   // Split all the Phis up between '_head' loop and the Region 'r'
@@ -1419,7 +1418,7 @@
       igvn.register_new_node_with_optimizer(phi, n);
       // Add the merge phi to the old Phi
       while( n->req() > 3 ) n->del_req( n->req()-1 );
-      n->set_req(2, phi);
+      igvn.replace_input_of(n, 2, phi);
       if( hot_idx ) n->add_req(hot_phi);
     }
   }
@@ -1495,13 +1494,14 @@
   if( fall_in_cnt > 1 ) {
     // Since I am just swapping inputs I do not need to update def-use info
     Node *tmp = _head->in(1);
+    igvn.rehash_node_delayed(_head);
     _head->set_req( 1, _head->in(fall_in_cnt) );
     _head->set_req( fall_in_cnt, tmp );
     // Swap also all Phis
     for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) {
       Node* phi = _head->fast_out(i);
       if( phi->is_Phi() ) {
-        igvn.hash_delete(phi); // Yank from hash before hacking edges
+        igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges
         tmp = phi->in(1);
         phi->set_req( 1, phi->in(fall_in_cnt) );
         phi->set_req( fall_in_cnt, tmp );
@@ -2905,6 +2905,7 @@
           uint k = 0;             // Probably cfg->in(0)
           while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
           cfg->set_req( k, if_t ); // Now point to NeverBranch
+          _igvn._worklist.push(cfg);
 
           // Now create the never-taken loop exit
           Node *if_f = new CProjNode( iff, 1 );
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -625,7 +625,7 @@
         set_ctrl(inp, cmov_ctrl);
       }
     }
-    Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) );
+    Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi));
     register_new_node( cmov, cmov_ctrl );
     _igvn.replace_node( phi, cmov );
 #ifndef PRODUCT
@@ -2574,7 +2574,7 @@
   new_head->set_unswitch_count(head->unswitch_count()); // Preserve
   _igvn.register_new_node_with_optimizer(new_head);
   assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled");
-  first_not_peeled->set_req(0, new_head);
+  _igvn.replace_input_of(first_not_peeled, 0, new_head);
   set_loop(new_head, loop);
   loop->_body.push(new_head);
   not_peel.set(new_head->_idx);
--- a/hotspot/src/share/vm/opto/machnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -178,7 +178,7 @@
 }
 
 // Return an equivalent instruction using memory for cisc_operand position
-MachNode *MachNode::cisc_version(int offset, Compile* C) {
+MachNode *MachNode::cisc_version(int offset) {
   ShouldNotCallThis();
   return NULL;
 }
@@ -411,7 +411,7 @@
 
 //------------------------------peephole---------------------------------------
 // Apply peephole rule(s) to this instruction
-MachNode *MachNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {
+MachNode *MachNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {
   return NULL;
 }
 
--- a/hotspot/src/share/vm/opto/machnode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -152,7 +152,7 @@
   virtual uint  cmp( const MachOper &oper ) const;
 
   // Virtual clone, since I do not know how big the MachOper is.
-  virtual MachOper *clone(Compile* C) const = 0;
+  virtual MachOper *clone() const = 0;
 
   // Return ideal Type from simple operands.  Fail for complex operands.
   virtual const Type *type() const;
@@ -202,10 +202,10 @@
   // Copy inputs and operands to new node of instruction.
   // Called from cisc_version() and short_branch_version().
   // !!!! The method's body is defined in ad_<arch>.cpp file.
-  void fill_new_machnode(MachNode *n, Compile* C) const;
+  void fill_new_machnode(MachNode *n) const;
 
   // Return an equivalent instruction using memory for cisc_operand position
-  virtual MachNode *cisc_version(int offset, Compile* C);
+  virtual MachNode *cisc_version(int offset);
   // Modify this instruction's register mask to use stack version for cisc_operand
   virtual void use_cisc_RegMask();
 
@@ -317,7 +317,7 @@
   virtual const class TypePtr *adr_type() const;
 
   // Apply peephole rule(s) to this instruction
-  virtual MachNode *peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C );
+  virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);
 
   // Top-level ideal Opcode matched
   virtual int ideal_Opcode()     const { return Op_Node; }
@@ -627,7 +627,7 @@
   virtual void save_label(Label** label, uint* block_num) = 0;
 
   // Support for short branches
-  virtual MachNode *short_branch_version(Compile* C) { return NULL; }
+  virtual MachNode *short_branch_version() { return NULL; }
 
   virtual bool pinned() const { return true; };
 };
@@ -985,7 +985,7 @@
 
   labelOper(labelOper* l) : _label(l->_label) , _block_num(l->_block_num) {}
 
-  virtual MachOper *clone(Compile* C) const;
+  virtual MachOper *clone() const;
 
   virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; }
 
@@ -1012,7 +1012,7 @@
   methodOper() :   _method(0) {}
   methodOper(intptr_t method) : _method(method)  {}
 
-  virtual MachOper *clone(Compile* C) const;
+  virtual MachOper *clone() const;
 
   virtual intptr_t method() const { return _method; }
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -702,6 +702,7 @@
   ciType* elem_type;
 
   Node* res = alloc->result_cast();
+  assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
   const TypeOopPtr* res_type = NULL;
   if (res != NULL) { // Could be NULL when there are no users
     res_type = _igvn.type(res)->isa_oopptr();
@@ -791,6 +792,7 @@
         for (int k = 0;  k < j; k++) {
           sfpt->del_req(last--);
         }
+        _igvn._worklist.push(sfpt);
         // rollback processed safepoints
         while (safepoints_done.length() > 0) {
           SafePointNode* sfpt_done = safepoints_done.pop();
@@ -815,6 +817,7 @@
               }
             }
           }
+          _igvn._worklist.push(sfpt_done);
         }
 #ifndef PRODUCT
         if (PrintEliminateAllocations) {
@@ -855,6 +858,7 @@
     int start = jvms->debug_start();
     int end   = jvms->debug_end();
     sfpt->replace_edges_in_range(res, sobj, start, end);
+    _igvn._worklist.push(sfpt);
     safepoints_done.append_if_missing(sfpt); // keep it for rollback
   }
   return true;
@@ -1034,6 +1038,8 @@
     return false;
   }
 
+  assert(boxing->result_cast() == NULL, "unexpected boxing node result");
+
   extract_call_projections(boxing);
 
   const TypeTuple* r = boxing->tf()->range();
@@ -1775,6 +1781,7 @@
       Node *pf_region = new RegionNode(3);
       Node *pf_phi_rawmem = new PhiNode( pf_region, Type::MEMORY,
                                              TypeRawPtr::BOTTOM );
+      transform_later(pf_region);
 
       // Generate several prefetch instructions.
       uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
--- a/hotspot/src/share/vm/opto/matcher.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -305,7 +305,7 @@
   // to avoid false sharing if the corresponding mach node is not used.
   // The corresponding mach node is only used in rare cases for derived
   // pointers.
-  Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
+  Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 
   // Swap out to old-space; emptying new-space
   Arena *old = C->node_arena()->move_contents(C->old_arena());
@@ -1643,8 +1643,8 @@
   }
 
   // Build the object to represent this state & prepare for recursive calls
-  MachNode *mach = s->MachNodeGenerator( rule, C );
-  mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
+  MachNode *mach = s->MachNodeGenerator(rule);
+  mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
   assert( mach->_opnds[0] != NULL, "Missing result operand" );
   Node *leaf = s->_leaf;
   // Check for instruction or instruction chain rule
@@ -1756,13 +1756,13 @@
     assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
             "Bad AD file: Instruction chain rule must chain from operand");
     // Insert operand into array of operands for this instruction
-    mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
+    mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
 
     ReduceOper( s, newrule, mem, mach );
   } else {
     // Chain from the result of an instruction
     assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
-    mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
+    mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
     Node *mem1 = (Node*)1;
     debug_only(Node *save_mem_node = _mem_node;)
     mach->add_req( ReduceInst(s, newrule, mem1) );
@@ -1807,7 +1807,7 @@
     if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
       // Operand/operandClass
       // Insert operand into array of operands for this instruction
-      mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
+      mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
       ReduceOper( newstate, newrule, mem, mach );
 
     } else {                    // Child is internal operand or new instruction
@@ -1818,7 +1818,7 @@
       } else {
         // instruction --> call build operand(  ) to catch result
         //             --> ReduceInst( newrule )
-        mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
+        mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
         Node *mem1 = (Node*)1;
         debug_only(Node *save_mem_node = _mem_node;)
         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
--- a/hotspot/src/share/vm/opto/mathexactnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/mathexactnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,7 +191,7 @@
       NativeType val1 = TypeClass::as_self(type1)->get_con();
       NativeType val2 = TypeClass::as_self(type2)->get_con();
       if (node->will_overflow(val1, val2) == false) {
-        Node* con_result = ConINode::make(phase->C, 0);
+        Node* con_result = ConINode::make(0);
         return con_result;
       }
       return NULL;
--- a/hotspot/src/share/vm/opto/memnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -933,12 +933,12 @@
   return (LoadNode*)NULL;
 }
 
-LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
   return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
 }
 
-LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
+LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
   return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic);
 }
@@ -1471,6 +1471,7 @@
 
   Node* ctrl    = in(MemNode::Control);
   Node* address = in(MemNode::Address);
+  bool progress = false;
 
   // Skip up past a SafePoint control.  Cannot do this for Stores because
   // pointer stores & cardmarks must stay on the same side of a SafePoint.
@@ -1478,6 +1479,7 @@
       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
     ctrl = ctrl->in(0);
     set_req(MemNode::Control,ctrl);
+    progress = true;
   }
 
   intptr_t ignore = 0;
@@ -1490,6 +1492,7 @@
         && all_controls_dominate(base, phase->C->start())) {
       // A method-invariant, non-null address (constant or 'this' argument).
       set_req(MemNode::Control, NULL);
+      progress = true;
     }
   }
 
@@ -1550,7 +1553,7 @@
     }
   }
 
-  return NULL;                  // No further progress
+  return progress ? this : NULL;
 }
 
 // Helper to recognize certain Klass fields which are invariant across
@@ -2014,7 +2017,6 @@
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
-  Compile* C = gvn.C;
   Node *ctl = NULL;
   // sanity check the alias category against the created node type
   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
@@ -2379,12 +2381,12 @@
   return (StoreNode*)NULL;
 }
 
-StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
   return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
 
-StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
+StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
   return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
@@ -2460,7 +2462,7 @@
       // and I need to disappear.
       if (moved != NULL) {
         // %%% hack to ensure that Ideal returns a new node:
-        mem = MergeMemNode::make(phase->C, mem);
+        mem = MergeMemNode::make(mem);
         return mem;             // fold me away
       }
     }
@@ -2820,7 +2822,6 @@
                                    intptr_t start_offset,
                                    Node* end_offset,
                                    PhaseGVN* phase) {
-  Compile* C = phase->C;
   intptr_t offset = start_offset;
 
   int unit = BytesPerLong;
@@ -2847,7 +2848,6 @@
     return mem;
   }
 
-  Compile* C = phase->C;
   int unit = BytesPerLong;
   Node* zbase = start_offset;
   Node* zend  = end_offset;
@@ -2875,7 +2875,6 @@
     return mem;
   }
 
-  Compile* C = phase->C;
   assert((end_offset % BytesPerInt) == 0, "odd end offset");
   intptr_t done_offset = end_offset;
   if ((done_offset % BytesPerLong) != 0) {
@@ -2944,6 +2943,7 @@
     return NULL;
   }
 
+  bool progress = false;
   // Eliminate volatile MemBars for scalar replaced objects.
   if (can_reshape && req() == (Precedent+1)) {
     bool eliminate = false;
@@ -2966,6 +2966,7 @@
           phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
           my_mem = NULL;
         }
+        progress = true;
       }
       if (my_mem != NULL && my_mem->is_Mem()) {
         const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
@@ -2995,7 +2996,7 @@
       return new ConINode(TypeInt::ZERO);
     }
   }
-  return NULL;
+  return progress ? this : NULL;
 }
 
 //------------------------------Value------------------------------------------
@@ -3497,6 +3498,7 @@
   // if it redundantly stored the same value (or zero to fresh memory).
 
   // In any case, wire it in:
+  phase->igvn_rehash_node_delayed(this);
   set_req(i, new_st);
 
   // The caller may now kill the old guy.
@@ -4126,7 +4128,7 @@
 
 // Make a new, untransformed MergeMem with the same base as 'mem'.
 // If mem is itself a MergeMem, populate the result with the same edges.
-MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
+MergeMemNode* MergeMemNode::make(Node* mem) {
   return new MergeMemNode(mem);
 }
 
--- a/hotspot/src/share/vm/opto/memnode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -333,7 +333,7 @@
   virtual int store_Opcode() const { return Op_StoreL; }
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+  static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
@@ -384,7 +384,7 @@
   virtual int store_Opcode() const { return Op_StoreD; }
   virtual BasicType memory_type() const { return T_DOUBLE; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+  static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
@@ -593,7 +593,7 @@
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+  static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     StoreNode::dump_spec(st);
@@ -629,7 +629,7 @@
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_DOUBLE; }
   bool require_atomic_access() const { return _require_atomic_access; }
-  static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
+  static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     StoreNode::dump_spec(st);
@@ -1138,7 +1138,7 @@
   // If the input is a whole memory state, clone it with all its slices intact.
   // Otherwise, make a new memory state with just that base memory input.
   // In either case, the result is a newly created MergeMem.
-  static MergeMemNode* make(Compile* C, Node* base_memory);
+  static MergeMemNode* make(Node* base_memory);
 
   virtual int Opcode() const;
   virtual Node *Identity( PhaseTransform *phase );
--- a/hotspot/src/share/vm/opto/movenode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/movenode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -88,7 +88,7 @@
     if( in(Condition)->is_Bool() ) {
       BoolNode* b  = in(Condition)->as_Bool();
       BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+      return make(in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type);
     }
   }
   return NULL;
@@ -158,7 +158,7 @@
 //------------------------------make-------------------------------------------
 // Make a correctly-flavored CMove.  Since _type is directly determined
 // from the inputs we do not need to specify it here.
-CMoveNode *CMoveNode::make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t ) {
+CMoveNode *CMoveNode::make(Node *c, Node *bol, Node *left, Node *right, const Type *t) {
   switch( t->basic_type() ) {
     case T_INT:     return new CMoveINode( bol, left, right, t->is_int() );
     case T_FLOAT:   return new CMoveFNode( bol, left, right, t );
@@ -196,7 +196,7 @@
     if( in(Condition)->is_Bool() ) {
       BoolNode* b  = in(Condition)->as_Bool();
       BoolNode* b2 = b->negate(phase);
-      return make( phase->C, in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type );
+      return make(in(Control), phase->transform(b2), in(IfTrue), in(IfFalse), _type);
     }
   }
 
--- a/hotspot/src/share/vm/opto/movenode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/movenode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -47,7 +47,7 @@
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   virtual const Type *Value( PhaseTransform *phase ) const;
   virtual Node *Identity( PhaseTransform *phase );
-  static CMoveNode *make( Compile *C, Node *c, Node *bol, Node *left, Node *right, const Type *t );
+  static CMoveNode *make(Node *c, Node *bol, Node *left, Node *right, const Type *t);
   // Helper function to spot cmove graph shapes
   static Node *is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f, BoolNode *b );
 };
--- a/hotspot/src/share/vm/opto/node.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -507,7 +507,7 @@
                                   (const void*)(&mthis->_opnds), 1));
     mach->_opnds = to;
     for ( uint i = 0; i < nopnds; ++i ) {
-      to[i] = from[i]->clone(C);
+      to[i] = from[i]->clone();
     }
   }
   // cloning CallNode may need to clone JVMState
@@ -620,6 +620,7 @@
   *(address*)this = badAddress;  // smash the C++ vtbl, probably
   _in = _out = (Node**) badAddress;
   _max = _cnt = _outmax = _outcnt = 0;
+  compile->remove_modified_node(this);
 #endif
 }
 
@@ -765,6 +766,7 @@
   if (n != NULL) n->del_out((Node *)this);
   _in[idx] = in(--_cnt);  // Compact the array
   _in[_cnt] = NULL;       // NULL out emptied slot
+  Compile::current()->record_modified_node(this);
 }
 
 //------------------------------del_req_ordered--------------------------------
@@ -780,6 +782,7 @@
     Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
   }
   _in[--_cnt] = NULL;   // NULL out emptied slot
+  Compile::current()->record_modified_node(this);
 }
 
 //------------------------------ins_req----------------------------------------
@@ -1297,6 +1300,7 @@
       // Done with outputs.
       igvn->hash_delete(dead);
       igvn->_worklist.remove(dead);
+      igvn->C->remove_modified_node(dead);
       igvn->set_type(dead, Type::TOP);
       if (dead->is_macro()) {
         igvn->C->remove_macro_node(dead);
--- a/hotspot/src/share/vm/opto/node.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/node.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -398,6 +398,7 @@
     if (*p != NULL)  (*p)->del_out((Node *)this);
     (*p) = n;
     if (n != NULL)      n->add_out((Node *)this);
+    Compile::current()->record_modified_node(this);
   }
   // Light version of set_req() to init inputs after node creation.
   void init_req( uint i, Node *n ) {
@@ -409,6 +410,7 @@
     assert( _in[i] == NULL, "sanity");
     _in[i] = n;
     if (n != NULL)      n->add_out((Node *)this);
+    Compile::current()->record_modified_node(this);
   }
   // Find first occurrence of n among my edges:
   int find_edge(Node* n);
--- a/hotspot/src/share/vm/opto/output.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -526,7 +526,7 @@
 
         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
           // We've got a winner.  Replace this branch.
-          MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+          MachNode* replacement = mach->as_MachBranch()->short_branch_version();
 
           // Update the jmp_size.
           int new_size = replacement->size(_regalloc);
@@ -785,9 +785,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint   *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = jlong_cast(d);
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -804,9 +805,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = d;
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -1174,7 +1176,7 @@
 
   // fill in the nop array for bundling computations
   MachNode *_nop_list[Bundle::_nop_count];
-  Bundle::initialize_nops(_nop_list, this);
+  Bundle::initialize_nops(_nop_list);
 
   return cb;
 }
@@ -1408,7 +1410,7 @@
 
             if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
               // We've got a winner.  Replace this branch.
-              MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
+              MachNode* replacement = mach->as_MachBranch()->short_branch_version();
 
               // Update the jmp_size.
               int new_size = replacement->size(_regalloc);
--- a/hotspot/src/share/vm/opto/parse1.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -575,12 +575,13 @@
       decrement_age();
     }
   }
-  if (depth() == 1) {
+
+  if (depth() == 1 && !failing()) {
     // Add check to deoptimize the nmethod if RTM state was changed
     rtm_deopt();
   }
 
-  // Check for bailouts during method entry.
+  // Check for bailouts during method entry or RTM state check setup.
   if (failing()) {
     if (log)  log->done("parse");
     C->set_default_node_notes(caller_nn);
@@ -1756,7 +1757,7 @@
       if (remerge == NULL) {
         assert(base != NULL, "");
         assert(base->in(0) != NULL, "should not be xformed away");
-        remerge = MergeMemNode::make(C, base->in(pnum));
+        remerge = MergeMemNode::make(base->in(pnum));
         gvn().set_type(remerge, Type::MEMORY);
         base->set_req(pnum, remerge);
       }
@@ -2199,7 +2200,7 @@
   // down below a SafePoint.
 
   // Clone the current memory state
-  Node* mem = MergeMemNode::make(C, map()->memory());
+  Node* mem = MergeMemNode::make(map()->memory());
 
   mem = _gvn.transform(mem);
 
@@ -2213,7 +2214,7 @@
 
   // Create a node for the polling address
   if( add_poll_param ) {
-    Node *polladr = ConPNode::make(C, (address)os::get_polling_page());
+    Node *polladr = ConPNode::make((address)os::get_polling_page());
     sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
   }
 
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
 
   // Get method
   const TypePtr* method_type = TypeMetadataPtr::make(method);
-  Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
+  Node *method_node = _gvn.transform(ConNode::make(method_type));
 
   kill_dead_locals();
 
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -615,7 +615,7 @@
 // Make an idealized constant - one of ConINode, ConPNode, etc.
 ConNode* PhaseValues::uncached_makecon(const Type *t) {
   assert(t->singleton(), "must be a constant");
-  ConNode* x = ConNode::make(C, t);
+  ConNode* x = ConNode::make(t);
   ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
   if (k == NULL) {
     set_type(x, t);             // Missed, provide type mapping
@@ -933,9 +933,32 @@
   for (int i = 0; i < _verify_window_size; i++) {
     _verify_window[i] = NULL;
   }
+#ifdef ASSERT
+  // Verify that all modified nodes are on _worklist
+  Unique_Node_List* modified_list = C->modified_nodes();
+  while (modified_list != NULL && modified_list->size()) {
+    Node* n = modified_list->pop();
+    if (n->outcnt() != 0 && !n->is_Con() && !_worklist.member(n)) {
+      n->dump();
+      assert(false, "modified node is not on IGVN._worklist");
+    }
+  }
+#endif
 }
 
 void PhaseIterGVN::verify_PhaseIterGVN() {
+#ifdef ASSERT
+  // Verify nodes with changed inputs.
+  Unique_Node_List* modified_list = C->modified_nodes();
+  while (modified_list != NULL && modified_list->size()) {
+    Node* n = modified_list->pop();
+    if (n->outcnt() != 0 && !n->is_Con()) { // skip dead and Con nodes
+      n->dump();
+      assert(false, "modified node was not processed by IGVN.transform_old()");
+    }
+  }
+#endif
+
   C->verify_graph_edges();
   if( VerifyOpto && allow_progress() ) {
     // Must turn off allow_progress to enable assert and break recursion
@@ -964,6 +987,14 @@
                   (int) _verify_counter, (int) _verify_full_passes);
     }
   }
+
+#ifdef ASSERT
+  while (modified_list->size()) {
+    Node* n = modified_list->pop();
+    n->dump();
+    assert(false, "VerifyIterativeGVN: new modified node was added");
+  }
+#endif
 }
 #endif /* PRODUCT */
 
@@ -1066,6 +1097,7 @@
   Node* k = n;
   DEBUG_ONLY(dead_loop_check(k);)
   DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
+  C->remove_modified_node(k);
   Node* i = k->Ideal(this, /*can_reshape=*/true);
   assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
 #ifndef PRODUCT
@@ -1107,6 +1139,7 @@
     DEBUG_ONLY(dead_loop_check(k);)
     // Try idealizing again
     DEBUG_ONLY(is_new = (k->outcnt() == 0);)
+    C->remove_modified_node(k);
     i = k->Ideal(this, /*can_reshape=*/true);
     assert(i != k || is_new || (i->outcnt() > 0), "don't return dead nodes");
 #ifndef PRODUCT
@@ -1259,6 +1292,7 @@
       _stack.pop();
       // Remove dead node from iterative worklist
       _worklist.remove(dead);
+      C->remove_modified_node(dead);
       // Constant node that has no out-edges and has only one in-edge from
       // root is usually dead. However, sometimes reshaping walk makes
       // it reachable by adding use edges. So, we will NOT count Con nodes
@@ -1288,7 +1322,7 @@
   for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
     Node* use = old->last_out(i);  // for each use...
     // use might need re-hashing (but it won't if it's a new node)
-    bool is_in_table = _table.hash_delete( use );
+    rehash_node_delayed(use);
     // Update use-def info as well
     // We remove all occurrences of old within use->in,
     // so as to avoid rehashing any node more than once.
@@ -1300,11 +1334,6 @@
         ++num_edges;
       }
     }
-    // Insert into GVN hash table if unique
-    // If a duplicate, 'use' will be cleaned up when pulled off worklist
-    if( is_in_table ) {
-      hash_find_insert(use);
-    }
     i -= num_edges;    // we deleted 1 or more copies of this edge
   }
 
@@ -1599,7 +1628,7 @@
     if( t == Type::TOP ) {
       // cache my top node on the Compile instance
       if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
-        C->set_cached_top_node( ConNode::make(C, Type::TOP) );
+        C->set_cached_top_node(ConNode::make(Type::TOP));
         set_type(C->top(), Type::TOP);
       }
       nn = C->top();
@@ -1725,7 +1754,7 @@
         MachNode *m = n->as_Mach();
         int deleted_count = 0;
         // check for peephole opportunities
-        MachNode *m2 = m->peephole( block, instruction_index, _regalloc, deleted_count, C );
+        MachNode *m2 = m->peephole(block, instruction_index, _regalloc, deleted_count);
         if( m2 != NULL ) {
 #ifndef PRODUCT
           if( PrintOptoPeephole ) {
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -311,6 +311,9 @@
                                const Type* limit_type) const
   { ShouldNotCallThis(); return NULL; }
 
+  // Delayed node rehash if this is an IGVN phase
+  virtual void igvn_rehash_node_delayed(Node* n) {}
+
 #ifndef PRODUCT
   void dump_old2new_map() const;
   void dump_new( uint new_lidx ) const;
@@ -488,6 +491,10 @@
     _worklist.push(n);
   }
 
+  void igvn_rehash_node_delayed(Node* n) {
+    rehash_node_delayed(n);
+  }
+
   // Replace ith edge of "n" with "in"
   void replace_input_of(Node* n, int i, Node* in) {
     rehash_node_delayed(n);
--- a/hotspot/src/share/vm/opto/rootnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/rootnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -35,10 +35,12 @@
 //------------------------------Ideal------------------------------------------
 // Remove dead inputs
 Node *RootNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+  bool modified = false;
   for( uint i = 1; i < req(); i++ ) { // For all inputs
     // Check for and remove dead inputs
     if( phase->type(in(i)) == Type::TOP ) {
       del_req(i--);             // Delete TOP inputs
+      modified = true;
     }
   }
 
@@ -56,7 +58,7 @@
   // If we want to get the rest of the win later, we should pattern match
   // simple recursive call trees to closed-form solutions.
 
-  return NULL;                  // No further opportunities exposed
+  return modified ? this : NULL;
 }
 
 //=============================================================================
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1438,7 +1438,7 @@
   }
   // Make sure the memory state is a MergeMem for parsing.
   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
-    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(map->in(TypeFunc::Memory)));
   }
 
   jvms->set_map(map);
--- a/hotspot/src/share/vm/opto/subnode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/subnode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1168,7 +1168,6 @@
 Node* BoolNode::make_predicate(Node* test_value, PhaseGVN* phase) {
   if (test_value->is_Con())   return test_value;
   if (test_value->is_Bool())  return test_value;
-  Compile* C = phase->C;
   if (test_value->is_CMove() &&
       test_value->in(CMoveNode::Condition)->is_Bool()) {
     BoolNode*   bol   = test_value->in(CMoveNode::Condition)->as_Bool();
@@ -1191,7 +1190,7 @@
 //--------------------------------as_int_value---------------------------------
 Node* BoolNode::as_int_value(PhaseGVN* phase) {
   // Inverse to make_predicate.  The CMove probably boils down to a Conv2B.
-  Node* cmov = CMoveNode::make(phase->C, NULL, this,
+  Node* cmov = CMoveNode::make(NULL, this,
                                phase->intcon(0), phase->intcon(1),
                                TypeInt::BOOL);
   return phase->transform(cmov);
@@ -1199,7 +1198,6 @@
 
 //----------------------------------negate-------------------------------------
 BoolNode* BoolNode::negate(PhaseGVN* phase) {
-  Compile* C = phase->C;
   return new BoolNode(in(1), _test.negate());
 }
 
--- a/hotspot/src/share/vm/opto/superword.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1378,9 +1378,23 @@
       if (n->is_Load()) {
         Node* ctl = n->in(MemNode::Control);
         Node* mem = first->in(MemNode::Memory);
+        SWPointer p1(n->as_Mem(), this);
+        // Identify the memory dependency for the new loadVector node by
+        // walking up through memory chain.
+        // This is done to give flexibility to the new loadVector node so that
+        // it can move above independent storeVector nodes.
+        while (mem->is_StoreVector()) {
+          SWPointer p2(mem->as_Mem(), this);
+          int cmp = p1.cmp(p2);
+          if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) {
+            mem = mem->in(MemNode::Memory);
+          } else {
+            break; // dependent memory
+          }
+        }
         Node* adr = low_adr->in(MemNode::Address);
         const TypePtr* atyp = n->adr_type();
-        vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
+        vn = LoadVectorNode::make(opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
         vlen_in_bytes = vn->as_LoadVector()->memory_size();
       } else if (n->is_Store()) {
         // Promote value to be stored to vector
@@ -1389,7 +1403,7 @@
         Node* mem = first->in(MemNode::Memory);
         Node* adr = low_adr->in(MemNode::Address);
         const TypePtr* atyp = n->adr_type();
-        vn = StoreVectorNode::make(C, opc, ctl, mem, adr, atyp, val, vlen);
+        vn = StoreVectorNode::make(opc, ctl, mem, adr, atyp, val, vlen);
         vlen_in_bytes = vn->as_StoreVector()->memory_size();
       } else if (n->req() == 3) {
         // Promote operands to vector
@@ -1401,7 +1415,7 @@
           in1 = in2;
           in2 = tmp;
         }
-        vn = VectorNode::make(C, opc, in1, in2, vlen, velt_basic_type(n));
+        vn = VectorNode::make(opc, in1, in2, vlen, velt_basic_type(n));
         vlen_in_bytes = vn->as_Vector()->length_in_bytes();
       } else {
         ShouldNotReachHere();
@@ -1450,11 +1464,11 @@
       if (t != NULL && t->is_con()) {
         juint shift = t->get_con();
         if (shift > mask) { // Unsigned cmp
-          cnt = ConNode::make(C, TypeInt::make(shift & mask));
+          cnt = ConNode::make(TypeInt::make(shift & mask));
         }
       } else {
         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
-          cnt = ConNode::make(C, TypeInt::make(mask));
+          cnt = ConNode::make(TypeInt::make(mask));
           _igvn.register_new_node_with_optimizer(cnt);
           cnt = new AndINode(opd, cnt);
           _igvn.register_new_node_with_optimizer(cnt);
@@ -1462,7 +1476,7 @@
         }
         assert(opd->bottom_type()->isa_int(), "int type only");
         // Move non constant shift count into vector register.
-        cnt = VectorNode::shift_count(C, p0, cnt, vlen, velt_basic_type(p0));
+        cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
       }
       if (cnt != opd) {
         _igvn.register_new_node_with_optimizer(cnt);
@@ -1475,7 +1489,7 @@
     // p0's vector. Use p0's type because size of operand's container in
     // vector should match p0's size regardless operand's size.
     const Type* p0_t = velt_type(p0);
-    VectorNode* vn = VectorNode::scalar2vector(_phase->C, opd, vlen, p0_t);
+    VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
 
     _igvn.register_new_node_with_optimizer(vn);
     _phase->set_ctrl(vn, _phase->get_ctrl(opd));
@@ -1490,7 +1504,7 @@
 
   // Insert pack operation
   BasicType bt = velt_basic_type(p0);
-  PackNode* pk = PackNode::make(_phase->C, opd, vlen, bt);
+  PackNode* pk = PackNode::make(opd, vlen, bt);
   DEBUG_ONLY( const BasicType opd_bt = opd->bottom_type()->basic_type(); )
 
   for (uint i = 1; i < vlen; i++) {
@@ -1546,7 +1560,7 @@
     _igvn.hash_delete(def);
     int def_pos = alignment(def) / data_size(def);
 
-    Node* ex = ExtractNode::make(_phase->C, def, def_pos, velt_basic_type(def));
+    Node* ex = ExtractNode::make(def, def_pos, velt_basic_type(def));
     _igvn.register_new_node_with_optimizer(ex);
     _phase->set_ctrl(ex, _phase->get_ctrl(def));
     _igvn.replace_input_of(use, idx, ex);
--- a/hotspot/src/share/vm/opto/type.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -5087,11 +5087,11 @@
 // Dump Function Type
 #ifndef PRODUCT
 void TypeFunc::dump2( Dict &d, uint depth, outputStream *st ) const {
-  if( _range->_cnt <= Parms )
+  if( _range->cnt() <= Parms )
     st->print("void");
   else {
     uint i;
-    for (i = Parms; i < _range->_cnt-1; i++) {
+    for (i = Parms; i < _range->cnt()-1; i++) {
       _range->field_at(i)->dump2(d,depth,st);
       st->print("/");
     }
@@ -5104,9 +5104,9 @@
     return;
   }
   d.Insert((void*)this,(void*)this);    // Stop recursion
-  if (Parms < _domain->_cnt)
+  if (Parms < _domain->cnt())
     _domain->field_at(Parms)->dump2(d,depth-1,st);
-  for (uint i = Parms+1; i < _domain->_cnt; i++) {
+  for (uint i = Parms+1; i < _domain->cnt(); i++) {
     st->print(", ");
     _domain->field_at(i)->dump2(d,depth-1,st);
   }
--- a/hotspot/src/share/vm/opto/type.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -609,16 +609,16 @@
 // signature types.
 class TypeTuple : public Type {
   TypeTuple( uint cnt, const Type **fields ) : Type(Tuple), _cnt(cnt), _fields(fields) { }
+
+  const uint          _cnt;              // Count of fields
+  const Type ** const _fields;           // Array of field types
+
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
 
-public:
-  const uint          _cnt;              // Count of fields
-  const Type ** const _fields;           // Array of field types
-
   // Accessors:
   uint cnt() const { return _cnt; }
   const Type* field_at(uint i) const {
@@ -1447,6 +1447,10 @@
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
+
+  const TypeTuple* const _domain;     // Domain of inputs
+  const TypeTuple* const _range;      // Range of results
+
 public:
   // Constants are shared among ADLC and VM
   enum { Control    = AdlcVMDeps::Control,
@@ -1457,8 +1461,6 @@
          Parms      = AdlcVMDeps::Parms
   };
 
-  const TypeTuple* const _domain;     // Domain of inputs
-  const TypeTuple* const _range;      // Range of results
 
   // Accessors:
   const TypeTuple* domain() const { return _domain; }
--- a/hotspot/src/share/vm/opto/vectornode.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,7 +245,7 @@
 }
 
 // Return the vector version of a scalar operation node.
-VectorNode* VectorNode::make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
+VectorNode* VectorNode::make(int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   int vopc = VectorNode::opcode(opc, bt);
   // This method should not be called for unimplemented vectors.
@@ -299,7 +299,7 @@
 }
 
 // Scalar promotion
-VectorNode* VectorNode::scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t) {
+VectorNode* VectorNode::scalar2vector(Node* s, uint vlen, const Type* opd_t) {
   BasicType bt = opd_t->array_element_basic_type();
   const TypeVect* vt = opd_t->singleton() ? TypeVect::make(opd_t, vlen)
                                           : TypeVect::make(bt, vlen);
@@ -323,7 +323,7 @@
   return NULL;
 }
 
-VectorNode* VectorNode::shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt) {
+VectorNode* VectorNode::shift_count(Node* shift, Node* cnt, uint vlen, BasicType bt) {
   assert(VectorNode::is_shift(shift) && !cnt->is_Con(), "only variable shift count");
   // Match shift count type with shift vector type.
   const TypeVect* vt = TypeVect::make(bt, vlen);
@@ -342,7 +342,7 @@
 }
 
 // Return initial Pack node. Additional operands added with add_opd() calls.
-PackNode* PackNode::make(Compile* C, Node* s, uint vlen, BasicType bt) {
+PackNode* PackNode::make(Node* s, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   switch (bt) {
   case T_BOOLEAN:
@@ -365,18 +365,18 @@
 }
 
 // Create a binary tree form for Packs. [lo, hi) (half-open) range
-PackNode* PackNode::binary_tree_pack(Compile* C, int lo, int hi) {
+PackNode* PackNode::binary_tree_pack(int lo, int hi) {
   int ct = hi - lo;
   assert(is_power_of_2(ct), "power of 2");
   if (ct == 2) {
-    PackNode* pk = PackNode::make(C, in(lo), 2, vect_type()->element_basic_type());
+    PackNode* pk = PackNode::make(in(lo), 2, vect_type()->element_basic_type());
     pk->add_opd(in(lo+1));
     return pk;
 
   } else {
     int mid = lo + ct/2;
-    PackNode* n1 = binary_tree_pack(C, lo,  mid);
-    PackNode* n2 = binary_tree_pack(C, mid, hi );
+    PackNode* n1 = binary_tree_pack(lo,  mid);
+    PackNode* n2 = binary_tree_pack(mid, hi );
 
     BasicType bt = n1->vect_type()->element_basic_type();
     assert(bt == n2->vect_type()->element_basic_type(), "should be the same");
@@ -402,23 +402,23 @@
 }
 
 // Return the vector version of a scalar load node.
-LoadVectorNode* LoadVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
+LoadVectorNode* LoadVectorNode::make(int opc, Node* ctl, Node* mem,
                                      Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) {
   const TypeVect* vt = TypeVect::make(bt, vlen);
   return new LoadVectorNode(ctl, mem, adr, atyp, vt);
 }
 
 // Return the vector version of a scalar store node.
-StoreVectorNode* StoreVectorNode::make(Compile* C, int opc, Node* ctl, Node* mem,
+StoreVectorNode* StoreVectorNode::make(int opc, Node* ctl, Node* mem,
                                        Node* adr, const TypePtr* atyp, Node* val,
                                        uint vlen) {
   return new StoreVectorNode(ctl, mem, adr, atyp, val);
 }
 
 // Extract a scalar element of vector.
-Node* ExtractNode::make(Compile* C, Node* v, uint position, BasicType bt) {
+Node* ExtractNode::make(Node* v, uint position, BasicType bt) {
   assert((int)position < Matcher::max_vector_size(bt), "pos in range");
-  ConINode* pos = ConINode::make(C, (int)position);
+  ConINode* pos = ConINode::make((int)position);
   switch (bt) {
   case T_BOOLEAN:
     return new ExtractUBNode(v, pos);
--- a/hotspot/src/share/vm/opto/vectornode.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,9 +52,9 @@
 
   virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(vect_type()->length_in_bytes()); }
 
-  static VectorNode* scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t);
-  static VectorNode* shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt);
-  static VectorNode* make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt);
+  static VectorNode* scalar2vector(Node* s, uint vlen, const Type* opd_t);
+  static VectorNode* shift_count(Node* shift, Node* cnt, uint vlen, BasicType bt);
+  static VectorNode* make(int opc, Node* n1, Node* n2, uint vlen, BasicType bt);
 
   static int  opcode(int opc, BasicType bt);
   static bool implemented(int opc, uint vlen, BasicType bt);
@@ -371,7 +371,7 @@
 
   virtual int store_Opcode() const { return Op_StoreVector; }
 
-  static LoadVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
+  static LoadVectorNode* make(int opc, Node* ctl, Node* mem,
                               Node* adr, const TypePtr* atyp, uint vlen, BasicType bt);
 };
 
@@ -394,7 +394,7 @@
   virtual BasicType memory_type() const { return T_VOID; }
   virtual int memory_size() const { return vect_type()->length_in_bytes(); }
 
-  static StoreVectorNode* make(Compile* C, int opc, Node* ctl, Node* mem,
+  static StoreVectorNode* make(int opc, Node* ctl, Node* mem,
                                Node* adr, const TypePtr* atyp, Node* val,
                                uint vlen);
 };
@@ -465,9 +465,9 @@
   }
 
   // Create a binary tree form for Packs. [lo, hi) (half-open) range
-  PackNode* binary_tree_pack(Compile* C, int lo, int hi);
+  PackNode* binary_tree_pack(int lo, int hi);
 
-  static PackNode* make(Compile* C, Node* s, uint vlen, BasicType bt);
+  static PackNode* make(Node* s, uint vlen, BasicType bt);
 };
 
 //------------------------------PackBNode--------------------------------------
@@ -552,7 +552,7 @@
   virtual int Opcode() const;
   uint  pos() const { return in(2)->get_int(); }
 
-  static Node* make(Compile* C, Node* v, uint position, BasicType bt);
+  static Node* make(Node* v, uint position, BasicType bt);
 };
 
 //------------------------------ExtractBNode-----------------------------------
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -661,7 +661,7 @@
              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
                                                                     top_frame_expression_stack_adjustment))) ||
             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
-            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
+            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
             )) {
         ttyLocker ttyl;
--- a/hotspot/src/share/vm/runtime/sharedRuntimeMath.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeMath.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -27,20 +27,51 @@
 
 #include <math.h>
 
-// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
-// [jk] this is not 100% correct because the float word order may different
-// from the byte order (e.g. on ARM FPA)
+// Used to access the lower/higher 32 bits of a double
+typedef union {
+    double d;
+    struct {
 #ifdef VM_LITTLE_ENDIAN
-# define __HI(x) *(1+(int*)&x)
-# define __LO(x) *(int*)&x
+      int lo;
+      int hi;
 #else
-# define __HI(x) *(int*)&x
-# define __LO(x) *(1+(int*)&x)
+      int hi;
+      int lo;
 #endif
+    } split;
+} DoubleIntConv;
+
+static inline int high(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.hi;
+}
+
+static inline int low(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.lo;
+}
+
+static inline void set_high(double* d, int high) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.hi = high;
+  *d = conv.d;
+}
+
+static inline void set_low(double* d, int low) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.lo = low;
+  *d = conv.d;
+}
 
 static double copysignA(double x, double y) {
-  __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
-  return x;
+  DoubleIntConv convX;
+  convX.d = x;
+  convX.split.hi = (convX.split.hi & 0x7fffffff) | (high(y) & 0x80000000);
+  return convX.d;
 }
 
 /*
@@ -67,30 +98,32 @@
 hugeX  = 1.0e+300,
 tiny   = 1.0e-300;
 
-static double scalbnA (double x, int n) {
+static double scalbnA(double x, int n) {
   int  k,hx,lx;
-  hx = __HI(x);
-  lx = __LO(x);
+  hx = high(x);
+  lx = low(x);
   k = (hx&0x7ff00000)>>20;              /* extract exponent */
   if (k==0) {                           /* 0 or subnormal x */
     if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
     x *= two54;
-    hx = __HI(x);
+    hx = high(x);
     k = ((hx&0x7ff00000)>>20) - 54;
     if (n< -50000) return tiny*x;       /*underflow*/
   }
   if (k==0x7ff) return x+x;             /* NaN or Inf */
   k = k+n;
-  if (k >  0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
-  if (k > 0)                            /* normal result */
-    {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
+  if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
+  if (k > 0) {                          /* normal result */
+    set_high(&x, (hx&0x800fffff)|(k<<20));
+    return x;
+  }
   if (k <= -54) {
     if (n > 50000)      /* in case integer overflow in n+k */
       return hugeX*copysignA(hugeX,x);  /*overflow*/
     else return tiny*copysignA(tiny,x); /*underflow*/
   }
   k += 54;                              /* subnormal result */
-  __HI(x) = (hx&0x800fffff)|(k<<20);
+  set_high(&x, (hx&0x800fffff)|(k<<20));
   return x*twom54;
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -40,6 +40,7 @@
 // generated; can not figure out how to turn down optimization for one
 // file in the IDE on Windows
 #ifdef WIN32
+# pragma warning( disable: 4748 ) // /GS can not protect parameters and local variables from local buffer overrun because optimizations are disabled in function
 # pragma optimize ( "", off )
 #endif
 
@@ -114,8 +115,8 @@
   int k,hx,i,j;
   unsigned lx;
 
-  hx = __HI(x);               /* high word of x */
-  lx = __LO(x);               /* low  word of x */
+  hx = high(x);               /* high word of x */
+  lx = low(x);                /* low  word of x */
 
   k=0;
   if (hx < 0x00100000) {                   /* x < 2**-1022  */
@@ -123,13 +124,13 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;   /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);             /* high word of x */
+    hx = high(x);             /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   hx &= 0x000fffff;
   i = (hx+0x95f64)&0x100000;
-  __HI(x) = hx|(i^0x3ff00000);        /* normalize x or x/2 */
+  set_high(&x, hx|(i^0x3ff00000)); /* normalize x or x/2 */
   k += (i>>20);
   f = x-1.0;
   if((0x000fffff&(2+hx))<3) {  /* |f| < 2**-20 */
@@ -208,8 +209,8 @@
   int i,k,hx;
   unsigned lx;
 
-  hx = __HI(x);       /* high word of x */
-  lx = __LO(x);       /* low word of x */
+  hx = high(x);       /* high word of x */
+  lx = low(x);        /* low word of x */
 
   k=0;
   if (hx < 0x00100000) {                  /* x < 2**-1022  */
@@ -217,14 +218,14 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;        /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);                /* high word of x */
+    hx = high(x);                /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   i  = ((unsigned)k&0x80000000)>>31;
   hx = (hx&0x000fffff)|((0x3ff-i)<<20);
   y  = (double)(k+i);
-  __HI(x) = hx;
+  set_high(&x, hx);
   z  = y*log10_2lo + ivln10*__ieee754_log(x);
   return  z+y*log10_2hi;
 }
@@ -319,14 +320,14 @@
   int k=0,xsb;
   unsigned hx;
 
-  hx  = __HI(x);        /* high word of x */
+  hx  = high(x);                /* high word of x */
   xsb = (hx>>31)&1;             /* sign bit of x */
   hx &= 0x7fffffff;             /* high word of |x| */
 
   /* filter out non-finite argument */
   if(hx >= 0x40862E42) {                        /* if |x|>=709.78... */
     if(hx>=0x7ff00000) {
-      if(((hx&0xfffff)|__LO(x))!=0)
+      if(((hx&0xfffff)|low(x))!=0)
         return x+x;             /* NaN */
       else return (xsb==0)? x:0.0;      /* exp(+-inf)={inf,0} */
     }
@@ -357,10 +358,10 @@
   if(k==0)      return one-((x*c)/(c-2.0)-x);
   else          y = one-((lo-(x*c)/(2.0-c))-hi);
   if(k >= -1021) {
-    __HI(y) += (k<<20); /* add k to y's exponent */
+    set_high(&y, high(y) + (k<<20)); /* add k to y's exponent */
     return y;
   } else {
-    __HI(y) += ((k+1000)<<20);/* add k to y's exponent */
+    set_high(&y, high(y) + ((k+1000)<<20)); /* add k to y's exponent */
     return y*twom1000;
   }
 }
@@ -447,8 +448,8 @@
   unsigned lx,ly;
 
   i0 = ((*(int*)&one)>>29)^1; i1=1-i0;
-  hx = __HI(x); lx = __LO(x);
-  hy = __HI(y); ly = __LO(y);
+  hx = high(x); lx = low(x);
+  hy = high(y); ly = low(y);
   ix = hx&0x7fffffff;  iy = hy&0x7fffffff;
 
   /* y==zero: x**0 = 1 */
@@ -548,14 +549,14 @@
     u = ivln2_h*t;      /* ivln2_h has 21 sig. bits */
     v = t*ivln2_l-w*ivln2;
     t1 = u+v;
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = v-(t1-u);
   } else {
     double ss,s2,s_h,s_l,t_h,t_l;
     n = 0;
     /* take care subnormal number */
     if(ix<0x00100000)
-      {ax *= two53; n -= 53; ix = __HI(ax); }
+      {ax *= two53; n -= 53; ix = high(ax); }
     n  += ((ix)>>20)-0x3ff;
     j  = ix&0x000fffff;
     /* determine interval */
@@ -563,17 +564,17 @@
     if(j<=0x3988E) k=0;         /* |x|<sqrt(3/2) */
     else if(j<0xBB67A) k=1;     /* |x|<sqrt(3)   */
     else {k=0;n+=1;ix -= 0x00100000;}
-    __HI(ax) = ix;
+    set_high(&ax, ix);
 
     /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
     u = ax-bp[k];               /* bp[0]=1.0, bp[1]=1.5 */
     v = one/(ax+bp[k]);
     ss = u*v;
     s_h = ss;
-    __LO(s_h) = 0;
+    set_low(&s_h, 0);
     /* t_h=ax+bp[k] High */
     t_h = zeroX;
-    __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+    set_high(&t_h, ((ix>>1)|0x20000000)+0x00080000+(k<<18));
     t_l = ax - (t_h-bp[k]);
     s_l = v*((u-s_h*t_h)-s_h*t_l);
     /* compute log(ax) */
@@ -582,32 +583,32 @@
     r += s_l*(s_h+ss);
     s2  = s_h*s_h;
     t_h = 3.0+s2+r;
-    __LO(t_h) = 0;
+    set_low(&t_h, 0);
     t_l = r-((t_h-3.0)-s2);
     /* u+v = ss*(1+...) */
     u = s_h*t_h;
     v = s_l*t_h+t_l*ss;
     /* 2/(3log2)*(ss+...) */
     p_h = u+v;
-    __LO(p_h) = 0;
+    set_low(&p_h, 0);
     p_l = v-(p_h-u);
     z_h = cp_h*p_h;             /* cp_h+cp_l = 2/(3*log2) */
     z_l = cp_l*p_h+p_l*cp+dp_l[k];
     /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
     t = (double)n;
     t1 = (((z_h+z_l)+dp_h[k])+t);
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = z_l-(((t1-t)-dp_h[k])-z_h);
   }
 
   /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
   y1  = y;
-  __LO(y1) = 0;
+  set_low(&y1, 0);
   p_l = (y-y1)*t1+y*t2;
   p_h = y1*t1;
   z = p_l+p_h;
-  j = __HI(z);
-  i = __LO(z);
+  j = high(z);
+  i = low(z);
   if (j>=0x40900000) {                          /* z >= 1024 */
     if(((j-0x40900000)|i)!=0)                   /* if z > 1024 */
       return s*hugeX*hugeX;                     /* overflow */
@@ -631,13 +632,13 @@
     n = j+(0x00100000>>(k+1));
     k = ((n&0x7fffffff)>>20)-0x3ff;     /* new k for n */
     t = zeroX;
-    __HI(t) = (n&~(0x000fffff>>k));
+    set_high(&t, (n&~(0x000fffff>>k)));
     n = ((n&0x000fffff)|0x00100000)>>(20-k);
     if(j<0) n = -n;
     p_h -= t;
   }
   t = p_l+p_h;
-  __LO(t) = 0;
+  set_low(&t, 0);
   u = t*lg2_h;
   v = (p_l-(t-p_h))*lg2+t*lg2_l;
   z = u+v;
@@ -646,10 +647,10 @@
   t1  = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
   r  = (z*t1)/(t1-two)-(w+z*w);
   z  = one-(r-z);
-  j  = __HI(z);
+  j  = high(z);
   j += (n<<20);
   if((j>>20)<=0) z = scalbnA(z,n);       /* subnormal output */
-  else __HI(z) += (n<<20);
+  else set_high(&z, high(z) + (n<<20));
   return s*z;
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Fri Aug 08 10:35:05 2014 -0700
@@ -519,7 +519,7 @@
 {
         double z,r,v;
         int ix;
-        ix = __HI(x)&0x7fffffff;        /* high word of x */
+        ix = high(x)&0x7fffffff;                /* high word of x */
         if(ix<0x3e400000)                       /* |x| < 2**-27 */
            {if((int)x==0) return x;}            /* generate inexact */
         z       =  x*x;
@@ -574,9 +574,9 @@
 
 static double __kernel_cos(double x, double y)
 {
-  double a,h,z,r,qx;
+  double a,h,z,r,qx=0;
   int ix;
-  ix = __HI(x)&0x7fffffff;      /* ix = |x|'s high word*/
+  ix = high(x)&0x7fffffff;              /* ix = |x|'s high word*/
   if(ix<0x3e400000) {                   /* if x < 2**27 */
     if(((int)x)==0) return one;         /* generate inexact */
   }
@@ -588,8 +588,8 @@
     if(ix > 0x3fe90000) {               /* x > 0.78125 */
       qx = 0.28125;
     } else {
-      __HI(qx) = ix-0x00200000; /* x/4 */
-      __LO(qx) = 0;
+      set_high(&qx, ix-0x00200000); /* x/4 */
+      set_low(&qx, 0);
     }
     h = 0.5*z-qx;
     a = one-qx;
@@ -654,11 +654,11 @@
 {
   double z,r,v,w,s;
   int ix,hx;
-  hx = __HI(x);   /* high word of x */
+  hx = high(x);           /* high word of x */
   ix = hx&0x7fffffff;     /* high word of |x| */
   if(ix<0x3e300000) {                     /* x < 2**-28 */
     if((int)x==0) {                       /* generate inexact */
-      if (((ix | __LO(x)) | (iy + 1)) == 0)
+      if (((ix | low(x)) | (iy + 1)) == 0)
         return one / fabsd(x);
       else {
         if (iy == 1)
@@ -667,10 +667,10 @@
           double a, t;
 
           z = w = x + y;
-          __LO(z) = 0;
+          set_low(&z, 0);
           v = y - (z - x);
           t = a = -one / w;
-          __LO(t) = 0;
+          set_low(&t, 0);
           s = one + t * z;
           return t + a * (s + t * v);
         }
@@ -705,10 +705,10 @@
     /*  compute -1.0/(x+r) accurately */
     double a,t;
     z  = w;
-    __LO(z) = 0;
+    set_low(&z, 0);
     v  = r-(z - x);     /* z+v = r+x */
     t = a  = -1.0/w;    /* a = -1.0/w */
-    __LO(t) = 0;
+    set_low(&t, 0);
     s  = 1.0+t*z;
     return t+a*(s+t*v);
   }
@@ -757,7 +757,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -815,7 +815,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -872,7 +872,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -558,6 +558,27 @@
   return fabs(value);
 }
 
+//----------------------------------------------------------------------------------------------------
+// Special casts
+// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
+typedef union {
+  jfloat f;
+  jint i;
+} FloatIntConv;
+
+typedef union {
+  jdouble d;
+  jlong l;
+  julong ul;
+} DoubleLongConv;
+
+inline jint    jint_cast    (jfloat  x)  { return ((FloatIntConv*)&x)->i; }
+inline jfloat  jfloat_cast  (jint    x)  { return ((FloatIntConv*)&x)->f; }
+
+inline jlong   jlong_cast   (jdouble x)  { return ((DoubleLongConv*)&x)->l;  }
+inline julong  julong_cast  (jdouble x)  { return ((DoubleLongConv*)&x)->ul; }
+inline jdouble jdouble_cast (jlong   x)  { return ((DoubleLongConv*)&x)->d;  }
+
 inline jint low (jlong value)                    { return jint(value); }
 inline jint high(jlong value)                    { return jint(value >> 32); }
 
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -167,17 +167,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-inline julong  julong_cast (jdouble x)           { return *(julong* )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -183,15 +183,6 @@
 typedef unsigned int       juint;
 typedef unsigned long long julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long constant is C++ compiler specific)
--- a/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -116,16 +116,6 @@
 typedef unsigned int     juint;
 typedef unsigned __int64 julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
-
 
 //----------------------------------------------------------------------------------------------------
 // Non-standard stdlib-like stuff:
--- a/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_xlc.hpp	Fri Aug 08 10:35:05 2014 -0700
@@ -114,16 +114,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- a/hotspot/test/compiler/5091921/Test7005594.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/5091921/Test7005594.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7005594.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -Xmx1600m -Xms1600m -XX:+IgnoreUnrecognizedVMOptions -XX:-ZapUnusedHeapArea -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+${TESTJAVA}/bin/java ${TESTOPTS} -Xmx1600m -Xms1600m -XX:+IgnoreUnrecognizedVMOptions -XX:-ZapUnusedHeapArea -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
 
 result=$?
 
--- a/hotspot/test/compiler/6857159/Test6857159.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/6857159/Test6857159.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test6857159.java
 
-${TESTJAVA}/bin/java  ${TESTVMOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
+${TESTJAVA}/bin/java  ${TESTOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
 
 grep "COMPILE SKIPPED" test.out
 
--- a/hotspot/test/compiler/6894807/IsInstanceTest.java	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/6894807/IsInstanceTest.java	Fri Aug 08 10:35:05 2014 -0700
@@ -1,3 +1,26 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
 /*
  * @test
  * @bug 6894807
--- a/hotspot/test/compiler/6894807/Test6894807.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/6894807/Test6894807.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,55 +1,38 @@
 #!/bin/sh
+#
+#  Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+#  This code is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU General Public License version 2 only, as
+#  published by the Free Software Foundation.
+# 
+#  This code is distributed in the hope that it will be useful, but WITHOUT
+#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+#  version 2 for more details (a copy is included in the LICENSE file that
+#  accompanied this code).
+# 
+#  You should have received a copy of the GNU General Public License version
+#  2 along with this work; if not, write to the Free Software Foundation,
+#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+#  or visit www.oracle.com if you need additional information or have any
+#  questions.
+# 
 
 if [ "${TESTSRC}" = "" ]
-then TESTSRC=.
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
 fi
-
-if [ "${TESTJAVA}" = "" ]
-then
-  PARENT=`dirname \`which java\``
-  TESTJAVA=`dirname ${PARENT}`
-  echo "TESTJAVA not set, selecting " ${TESTJAVA}
-  echo "If this is incorrect, try setting the variable manually."
-fi
-
-if [ "${TESTCLASSES}" = "" ]
-then
-  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
-  exit 1
-fi
+echo "TESTSRC=${TESTSRC}"
 
-# set platform-dependent variables
-OS=`uname -s`
-case "$OS" in
-  SunOS | Linux | Darwin )
-    NULL=/dev/null
-    PS=":"
-    FS="/"
-    ;;
-  Windows_* )
-    NULL=NUL
-    PS=";"
-    FS="\\"
-    ;;
-  CYGWIN_* )
-    NULL=/dev/null
-    PS=";"
-    FS="/"
-    ;;
-  * )
-    echo "Unrecognized system!"
-    exit 1;
-    ;;
-esac
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
 
-JEMMYPATH=${CPAPPEND}
-CLASSPATH=.${PS}${TESTCLASSES}${PS}${JEMMYPATH} ; export CLASSPATH
-
-THIS_DIR=`pwd`
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version
-
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} IsInstanceTest > test.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} IsInstanceTest > test.out 2>&1
 
 cat test.out
 
--- a/hotspot/test/compiler/6932496/Test6932496.java	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/6932496/Test6932496.java	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,26 +26,162 @@
  * @test
  * @bug 6932496
  * @summary incorrect deopt of jsr subroutine on 64 bit c1
- *
- * @compile -source 1.5 -target 1.5 -XDjsrlimit=0 Test6932496.java
- * @run main/othervm -Xcomp -XX:CompileOnly=Test6932496.m Test6932496
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test.test Test6932496
  */
+import java.lang.reflect.Method;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.io.IOException;
 
-public class Test6932496 {
-    static class A {
-        volatile boolean flag = false;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.FieldVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import jdk.internal.org.objectweb.asm.Type;
+import jdk.internal.org.objectweb.asm.Label;
+
+public class Test6932496 extends ClassLoader {
+    private static final int CLASS_FILE_VERSION = 49;
+    private static final String CLASS_TEST = "Test";
+    private static final String CLASS_OBJECT = "java/lang/Object";
+    private static final String METHOD_INIT = "<init>";
+    private static final String METHOD_TEST = "test";
+    private static final String DESC_VOID_METHOD = "()V";
+    private static final String FIELD_FLAG = "flag";
+
+    public static void main(String[] args) {
+        Test6932496 test = new Test6932496();
+        test.execute();
     }
 
-    static void m() {
+    private void execute() {
+        byte[] bytecode = Test6932496.generateTestClass();
+
         try {
-        } finally {
-            A a = new A();
-            a.flag = true;
+            Files.write(Paths.get("Test.class.dump"), bytecode);
+        } catch (IOException e) {
+            System.err.println("classfile dump failed : " + e.getMessage());
+            e.printStackTrace();
+        }
+        try {
+            Class aClass = defineClass(CLASS_TEST, bytecode, 0, bytecode.length);
+            Method test = aClass.getDeclaredMethod(METHOD_TEST);
+            test.invoke(null);
+        } catch (ClassFormatError | IllegalArgumentException
+                    | ReflectiveOperationException e) {
+            throw new RuntimeException("TESTBUG : generated class is invalid", e);
         }
     }
 
+    /*
+        public class Test {
+            volatile boolean flag = false;
+            public static void m() {
+                try {
+                } finally {
+                    Test test = new Test();
+                    test.flag = true;
+                }
+            }
+        }
+    */
+    private static byte[] generateTestClass() {
+        ClassWriter cw = new ClassWriter(0);
+        cw.visit(CLASS_FILE_VERSION, Opcodes.ACC_PUBLIC + Opcodes.ACC_SUPER,
+                CLASS_TEST, null, CLASS_OBJECT, null);
+        // volatile boolean flag;
+        {
+            FieldVisitor fv = cw.visitField(Opcodes.ACC_VOLATILE, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor(),
+                    /* signature = */ null, /* value = */ null);
+        }
 
-    static public void main(String[] args) {
-        m();
+        /*
+            public Test() {
+                flag = false;
+            }
+        */
+        {
+            MethodVisitor mv = cw.visitMethod(Opcodes.ACC_PUBLIC,
+                    METHOD_INIT, DESC_VOID_METHOD,
+                    /* signature = */ null, /* exceptions = */ null);
+
+            mv.visitCode();
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitMethodInsn(Opcodes.INVOKESPECIAL, CLASS_OBJECT, METHOD_INIT,
+                    DESC_VOID_METHOD, false);
+
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitInsn(Opcodes.ICONST_0);
+            mv.visitFieldInsn(Opcodes.PUTFIELD, CLASS_TEST, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor());
+
+            mv.visitInsn(Opcodes.RETURN);
+            mv.visitMaxs(/* stack = */ 2, /* locals = */ 1);
+            mv.visitEnd();
+        }
+
+        /*
+            public static void m() {
+                try {
+                } finally {
+                    Test test = new Test();
+                    test.flag = true;
+                }
+            }
+        */
+        {
+            MethodVisitor mv = cw.visitMethod(
+                    Opcodes.ACC_STATIC + Opcodes.ACC_PUBLIC,
+                    METHOD_TEST, DESC_VOID_METHOD,
+                    /* signature = */ null, /* exceptions = */ null);
+            Label beginLabel = new Label();
+            Label block1EndLabel = new Label();
+            Label handlerLabel = new Label();
+            Label block2EndLabel = new Label();
+            Label label = new Label();
+            Label endLabel = new Label();
+
+            mv.visitCode();
+            mv.visitTryCatchBlock(beginLabel, block1EndLabel, handlerLabel,
+                    /* type = <any> */ null);
+            mv.visitTryCatchBlock(handlerLabel, block2EndLabel, handlerLabel,
+                    /* type = <any> */ null);
+
+            mv.visitLabel(beginLabel);
+            mv.visitJumpInsn(Opcodes.JSR, label);
+            mv.visitLabel(block1EndLabel);
+            mv.visitJumpInsn(Opcodes.GOTO, endLabel);
+
+            mv.visitLabel(handlerLabel);
+            mv.visitVarInsn(Opcodes.ASTORE, 0);
+            mv.visitJumpInsn(Opcodes.JSR, label);
+            mv.visitLabel(block2EndLabel);
+            mv.visitVarInsn(Opcodes.ALOAD, 0);
+            mv.visitInsn(Opcodes.ATHROW);
+
+            mv.visitLabel(label);
+            mv.visitVarInsn(Opcodes.ASTORE, 1);
+            mv.visitTypeInsn(Opcodes.NEW, CLASS_TEST);
+            mv.visitInsn(Opcodes.DUP);
+            mv.visitMethodInsn(Opcodes.INVOKESPECIAL, CLASS_TEST, METHOD_INIT,
+                    DESC_VOID_METHOD);
+            mv.visitVarInsn(Opcodes.ASTORE, 2);
+
+            mv.visitVarInsn(Opcodes.ALOAD, 2);
+            mv.visitInsn(Opcodes.ICONST_1);
+            mv.visitFieldInsn(Opcodes.PUTFIELD, CLASS_TEST, FIELD_FLAG,
+                    Type.BOOLEAN_TYPE.getDescriptor());
+
+            mv.visitVarInsn(Opcodes.RET, 1);
+
+            mv.visitLabel(endLabel);
+            mv.visitInsn(Opcodes.RETURN);
+            mv.visitMaxs(/* stack = */ 2, /* locals = */ 3);
+            mv.visitEnd();
+        }
+
+        cw.visitEnd();
+        return cw.toByteArray();
     }
 }
--- a/hotspot/test/compiler/7068051/Test7068051.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/7068051/Test7068051.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -41,5 +41,5 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7068051.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -showversion -Xbatch Test7068051 foo.jar
+${TESTJAVA}/bin/java ${TESTOPTS} -showversion -Xbatch Test7068051 foo.jar
 
--- a/hotspot/test/compiler/7070134/Test7070134.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/7070134/Test7070134.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 
 ${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Stemmer.java
 
-${TESTJAVA}/bin/java ${TESTVMOPTS} -Xbatch Stemmer words > test.out 2>&1
+${TESTJAVA}/bin/java ${TESTOPTS} -Xbatch Stemmer words > test.out 2>&1
 
 exit $?
 
--- a/hotspot/test/compiler/7200264/Test7200264.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/7200264/Test7200264.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
 ## Adding common setup Variables for running shell tests.
 . ${TESTSRC}/../../test_env.sh
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xinternalversion | sed 's/amd64/x86/' | grep "x86" | grep "Server VM" | grep "debug"
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xinternalversion | sed 's/amd64/x86/' | grep "x86" | grep "Server VM" | grep "debug"
 
 # Only test fastdebug Server VM on x86
 if [ $? != 0 ]
@@ -43,7 +43,7 @@
 fi
 
 # grep for support integer multiply vectors (cpu with SSE4.1)
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
 
 if [ $? != 0 ]
 then
@@ -55,7 +55,7 @@
 cp ${TESTSRC}${FS}TestIntVect.java .
 ${COMPILEJAVA}${FS}bin${FS}javac ${TESTJAVACOPTS} -d . TestIntVect.java
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xbatch -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+PrintCompilation -XX:+TraceNewVectors TestIntVect > test.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xbatch -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+PrintCompilation -XX:+TraceNewVectors TestIntVect > test.out 2>&1
 
 COUNT=`grep AddVI test.out | wc -l | awk '{print $1}'`
 if [ $COUNT -lt 4 ]
--- a/hotspot/test/compiler/ciReplay/TestSA.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/TestSA.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -68,7 +68,7 @@
 fi
 
 echo "dumpreplaydata -a > ${replay_data}" | \
-        ${JAVA} ${TESTVMOPTS} \
+        ${JAVA} ${TESTOPTS} \
         -cp ${TESTJAVA}${FS}lib${FS}sa-jdi.jar \
         sun.jvm.hotspot.CLHSDB  ${JAVA} ${core_file}
 
--- a/hotspot/test/compiler/ciReplay/common.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/compiler/ciReplay/common.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -22,6 +22,7 @@
 # questions.
 # 
 # 
+set -x
 
 # $1 - error code
 # $2 - test name
@@ -56,7 +57,7 @@
     shift
     name=$1
     shift
-    VMOPTS="${TESTVMOPTS} $@"
+    VMOPTS="${TESTOPTS} $@"
     echo "POSITIVE TEST [$name]"
     start_test ${VMOPTS}
     exit_code=$?
@@ -75,7 +76,7 @@
     shift
     name=$1
     shift
-    VMOPTS="${TESTVMOPTS} $@"
+    VMOPTS="${TESTOPTS} $@"
     echo "NEGATIVE TEST [$name]"
     start_test ${VMOPTS}
     exit_code=$?
@@ -149,7 +150,7 @@
 
 replay_data=test_replay.txt
 
-${JAVA} ${TESTVMOPTS} -Xinternalversion 2>&1 | grep debug
+${JAVA} ${TESTOPTS} -Xinternalversion 2>&1 | grep debug
 
 # Only test fastdebug 
 if [ $? -ne 0 ]
@@ -158,7 +159,7 @@
     exit 0
 fi
 
-is_int=`${JAVA} ${TESTVMOPTS} -version 2>&1 | grep -c "interpreted mode"`
+is_int=`${JAVA} ${TESTOPTS} -version 2>&1 | grep -c "interpreted mode"`
 # Not applicable for Xint
 if [ $is_int -ne 0 ]
 then
@@ -168,14 +169,14 @@
 
 cleanup
 
-client_available=`${JAVA} ${TESTVMOPTS} -client -Xinternalversion 2>&1 | \
+client_available=`${JAVA} ${TESTOPTS} -client -Xinternalversion 2>&1 | \
         grep -c Client`
-server_available=`${JAVA} ${TESTVMOPTS} -server -Xinternalversion 2>&1 | \
+server_available=`${JAVA} ${TESTOPTS} -server -Xinternalversion 2>&1 | \
         grep -c Server`
-tiered_available=`${JAVA} ${TESTVMOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \
+tiered_available=`${JAVA} ${TESTOPTS} -XX:+TieredCompilation -XX:+PrintFlagsFinal -version | \
         grep TieredCompilation | \
         grep -c true`
-is_tiered=`${JAVA} ${TESTVMOPTS} -XX:+PrintFlagsFinal -version | \
+is_tiered=`${JAVA} ${TESTOPTS} -XX:+PrintFlagsFinal -version | \
         grep TieredCompilation | \
         grep -c true`
 # CompLevel_simple -- C1
@@ -207,7 +208,7 @@
         fi
     fi
 
-    cmd="${JAVA} ${TESTVMOPTS} $@ \
+    cmd="${JAVA} ${TESTOPTS} $@ \
             -Xms8m \
             -Xmx32m \
             -XX:MetaspaceSize=4m \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java	Fri Aug 08 10:35:05 2014 -0700
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+
+/*
+ * @test MethodUnloadingTest
+ * @bug 8029443
+ * @summary "Tests the unloading of methods to to class unloading"
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestMethodUnloading
+ * @build WorkerClass
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:+UseParallelGC -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
+ */
+public class TestMethodUnloading {
+    private static final String workerClassName = "WorkerClass";
+    private static int work = -1;
+
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static int COMP_LEVEL_SIMPLE = 1;
+    private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
+
+    /**
+     * Does some work by either using the workerClass or locally producing values.
+     * @param workerClass Class performing some work (will be unloaded)
+     * @param useWorker If true the workerClass is used
+     */
+    static private void doWork(Class<?> workerClass, boolean useWorker) throws InstantiationException, IllegalAccessException {
+        if (useWorker) {
+            // Create a new instance
+            Object worker = workerClass.newInstance();
+            // We would like to call a method of WorkerClass here but we cannot cast to WorkerClass
+            // because the class was loaded by a different class loader. One solution would be to use
+            // reflection but since we want C2 to implement the call as an optimized IC we call
+            // Object::hashCode() here which actually calls WorkerClass::hashCode().
+            // C2 will then implement this call as an optimized IC that points to a to-interpreter stub
+            // referencing the Method* for WorkerClass::hashCode().
+            work = worker.hashCode();
+            if (work != 42) {
+                new RuntimeException("Work not done");
+            }
+        } else {
+            // Do some important work here
+            work = 1;
+        }
+    }
+
+    /**
+     * Makes sure that method is compiled by forcing compilation if not yet compiled.
+     * @param m Method to be checked
+     */
+    static private void makeSureIsCompiled(Method m) {
+        // Make sure background compilation is disabled
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new RuntimeException("Background compilation enabled");
+        }
+
+        // Check if already compiled
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
+            }
+            // Because background compilation is disabled, method should now be compiled
+            if(!WHITE_BOX.isMethodCompiled(m)) {
+                throw new RuntimeException(m + " not compiled");
+            }
+        }
+    }
+
+    /**
+     * This test creates stale Method* metadata in a to-interpreter stub of an optimized IC.
+     *
+     * The following steps are performed:
+     * (1) A workerClass is loaded by a custom class loader
+     * (2) The method doWork that calls a method of the workerClass is compiled. The call
+     *     is implemented as an optimized IC calling a to-interpreted stub. The to-interpreter
+     *     stub contains a Method* to a workerClass method.
+     * (3) Unloading of the workerClass is enforced. The to-interpreter stub now contains a dead Method*.
+     * (4) Depending on the implementation of the IC, the compiled version of doWork should still be
+     *     valid. We call it again without using the workerClass.
+     */
+    static public void main(String[] args) throws Exception {
+        // (1) Create a custom class loader with no parent class loader
+        URL url = TestMethodUnloading.class.getProtectionDomain().getCodeSource().getLocation();
+        URLClassLoader loader = new URLClassLoader(new URL[] {url}, null);
+
+        // Load worker class with custom class loader
+        Class<?> workerClass = Class.forName(workerClassName, true, loader);
+
+        // (2) Make sure all paths of doWork are profiled and compiled
+        for (int i = 0; i < 100000; ++i) {
+            doWork(workerClass, true);
+            doWork(workerClass, false);
+        }
+
+        // Make sure doWork is compiled now
+        Method doWork = TestMethodUnloading.class.getDeclaredMethod("doWork", Class.class, boolean.class);
+        makeSureIsCompiled(doWork);
+
+        // (3) Throw away class loader and reference to workerClass to allow unloading
+        loader.close();
+        loader = null;
+        workerClass = null;
+
+        // Force garbage collection to trigger unloading of workerClass
+        // Dead reference to WorkerClass::hashCode triggers JDK-8029443
+        WHITE_BOX.fullGC();
+
+        // (4) Depending on the implementation of the IC, the compiled version of doWork
+        // may still be valid here. Execute it without a workerClass.
+        doWork(null, false);
+        if (work != 1) {
+            throw new RuntimeException("Work not done");
+        }
+
+        doWork(Object.class, false);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/classUnloading/methodUnloading/WorkerClass.java	Fri Aug 08 10:35:05 2014 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Worker class that is dynamically loaded/unloaded by TestMethodUnloading.
+ */
+public class WorkerClass {
+    /**
+     * We override hashCode here to be able to access this implementation
+     * via an Object reference (we cannot cast to WorkerClass).
+     */
+    @Override
+    public int hashCode() {
+        return 42;
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/macronodes/TestEliminateAllocationPhi.java	Fri Aug 08 10:35:05 2014 -0700
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8046698
+ * @summary PhiNode inserted between AllocateNode and Initialization node confuses allocation elimination
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminateAllocationPhi
+ *
+ */
+
+public class TestEliminateAllocationPhi {
+
+    // This will return I when called from m(0 and once optimized will
+    // go away but this will confuse escape analysis in m(): it will
+    // find I as non escaping but non scalar replaceable. In its own
+    // method so that we can make the profile of the if() branch look
+    // like it's taken sometimes.
+    static Integer m2(Integer I, int i) {
+        for (; i < 10; i=(i+2)*(i+2)) {
+        }
+        if (i == 121) {
+            return II;
+        }
+        return I;
+    }
+
+    static Integer II = new Integer(42);
+
+    static int m(int[] integers, boolean flag) {
+        int j = 0;
+        while(true) {
+            try {
+                int k = integers[j++];
+                // A branch that will cause loop unswitching
+                if (flag) {
+                    k += 42;
+                }
+                if (k < 1000) {
+                    throw new Exception();
+                }
+                // Because of the try/catch the Allocate node for this
+                // new will be in the loop while the Initialization
+                // node will be outside the loop. When loop
+                // unswitching happens, the Allocate node will be
+                // cloned and the results of both will be inputs to a
+                // Phi that will be between the Allocate nodes and the
+                // Initialization nodes.
+                Integer I = new Integer(k);
+
+                I = m2(I, 0);
+
+                int i = I.intValue();
+                return i;
+            } catch(Exception e) {
+            }
+        }
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 5000; i++) {
+            m2(null, 1);
+        }
+
+        int[] integers = { 2000 };
+        for (int i = 0; i < 6000; i++) {
+            m(integers, (i%2) == 0);
+        }
+        int[] integers2 = { 1, 2, 3, 4, 5, 2000 };
+        for (int i = 0; i < 10000; i++) {
+            m(integers2, (i%2) == 0);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/osr/TestOSRWithNonEmptyStack.java	Fri Aug 08 10:35:05 2014 -0700
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Label;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+/**
+ * @test
+ * @bug 8051344
+ * @summary Force OSR compilation with non-empty stack at the OSR entry point.
+ * @compile -XDignore.symbol.file TestOSRWithNonEmptyStack.java
+ * @run main/othervm -XX:CompileOnly=TestCase.test TestOSRWithNonEmptyStack
+ */
+public class TestOSRWithNonEmptyStack extends ClassLoader {
+    private static final int CLASS_FILE_VERSION = 52;
+    private static final String CLASS_NAME = "TestCase";
+    private static final String METHOD_NAME = "test";
+    private static final int ITERATIONS = 1_000_000;
+
+    private static byte[] generateTestClass() {
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+
+        cw.visit(TestOSRWithNonEmptyStack.CLASS_FILE_VERSION, ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.CLASS_NAME, null, "java/lang/Object",
+                null);
+
+        TestOSRWithNonEmptyStack.generateConstructor(cw);
+        TestOSRWithNonEmptyStack.generateTestMethod(cw);
+
+        cw.visitEnd();
+        return cw.toByteArray();
+    }
+
+    private static void generateConstructor(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC, "<init>", "()V",
+                null, null);
+
+        mv.visitCode();
+
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V",
+                false);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private static void generateTestMethod(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.METHOD_NAME, "()V", null, null);
+        Label osrEntryPoint = new Label();
+
+        mv.visitCode();
+        // Push 'this' into stack before OSR entry point to bail out compilation
+        mv.visitVarInsn(ALOAD, 0);
+        // Setup loop counter
+        mv.visitInsn(ICONST_0);
+        mv.visitVarInsn(ISTORE, 1);
+        // Begin loop
+        mv.visitLabel(osrEntryPoint);
+        // Increment loop counter
+        mv.visitVarInsn(ILOAD, 1);
+        mv.visitInsn(ICONST_1);
+        mv.visitInsn(IADD);
+        // Duplicate it for loop condition check
+        mv.visitInsn(DUP);
+        mv.visitVarInsn(ISTORE, 1);
+        // Check loop condition
+        mv.visitLdcInsn(TestOSRWithNonEmptyStack.ITERATIONS);
+        mv.visitJumpInsn(IF_ICMPLT, osrEntryPoint);
+        // Pop 'this'.
+        mv.visitInsn(POP);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private void run() {
+        byte[] bytecode = TestOSRWithNonEmptyStack.generateTestClass();
+
+        try {
+            Class klass = defineClass(TestOSRWithNonEmptyStack.CLASS_NAME,
+                    bytecode, 0, bytecode.length);
+
+            Constructor ctor = klass.getConstructor();
+            Method method = klass.getDeclaredMethod(
+                    TestOSRWithNonEmptyStack.METHOD_NAME);
+
+            Object testCase = ctor.newInstance();
+            method.invoke(testCase);
+        } catch (Exception e) {
+            throw new RuntimeException(
+                    "Test bug: generated class should be valid.", e);
+        }
+    }
+
+    public static void main(String args[]) {
+        new TestOSRWithNonEmptyStack().run();
+    }
+}
--- a/hotspot/test/runtime/6626217/Test6626217.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/runtime/6626217/Test6626217.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -69,7 +69,7 @@
 ${MV} many_loader.impl1 many_loader.class
 ${RM} many_loader.java
 
-${JAVA} ${TESTVMOPTS} -Xverify -Xint -cp . bug_21227 >test.out 2>&1
+${JAVA} ${TESTOPTS} -Xverify -Xint -cp . bug_21227 >test.out 2>&1
 grep "loader constraint" test.out
 exit $?
 
--- a/hotspot/test/runtime/6888954/vmerrors.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/runtime/6888954/vmerrors.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,4 +1,4 @@
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,16 @@
 # export TESTJAVA TESTVMOPTS
 # sh test/runtime/6888954/vmerrors.sh
 
+if [ "${TESTSRC}" = "" ]
+then
+  TESTSRC=${PWD}
+  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+
 ulimit -c 0 # no core files
 
 i=1
@@ -84,7 +94,7 @@
     i2=$i
     [ $i -lt 10 ] && i2=0$i
 
-    "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
+    "$TESTJAVA/bin/java" $TESTOPTS -XX:+IgnoreUnrecognizedVMOptions \
         -XX:-TransmitErrorReport -XX:-CreateMinidumpOnCrash \
         -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
 
--- a/hotspot/test/runtime/7162488/Test7162488.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/runtime/7162488/Test7162488.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,5 +1,5 @@
 #
-#  Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 #  This code is free software; you can redistribute it and/or modify it
@@ -45,14 +45,14 @@
 #
 OPTION=this_is_not_an_option
 
-${JAVA} ${TESTVMOPTS} -showversion -XX:${OPTION} 2>&1 | grep "Unrecognized VM option" 
+${JAVA} -showversion -XX:${OPTION} 2>&1 | grep "Unrecognized VM option" 
 if [ "$?" != "0" ]
 then
   printf "FAILED: option not flagged as unrecognized.\n"
   exit 1
 fi
 
-${JAVA} ${TESTVMOPTS} -showversion -XX:${OPTION} 2>&1 | grep ${OPTION}
+${JAVA} -showversion -XX:${OPTION} 2>&1 | grep ${OPTION}
 if [ "$?" != "0" ]
 then
   printf "FAILED: bad option not named as being bad.\n"
--- a/hotspot/test/test_env.sh	Fri Aug 08 07:38:34 2014 -0700
+++ b/hotspot/test/test_env.sh	Fri Aug 08 10:35:05 2014 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-#  Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 #  This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,9 @@
 fi
 echo "TESTCLASSES=${TESTCLASSES}"
 
+TESTOPTS="${TESTVMOPTS} ${TESTJAVAOPTS}"
+echo "TESTOPTS=${TESTOPTS}"
+
 # set platform-dependent variables
 OS=`uname -s`
 case "$OS" in
@@ -101,14 +104,14 @@
 echo "THIS_DIR=${THIS_DIR}"
 
 # Check to ensure the java defined actually works
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -version
 if [ $? != 0 ]; then
-  echo "Wrong TESTJAVA or TESTVMOPTS:"
-  echo $TESTJAVA TESTVMOPTS
+  echo "Wrong TESTJAVA or TESTJAVAOPTS or TESTVMOPTS:"
+  echo ''$TESTJAVA'' ''$TESTJAVAOPTS'' ''$TESTVMOPTS''
   exit 1
 fi
 
-${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Xinternalversion > vm_version.out 2>&1
+${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xinternalversion > vm_version.out 2>&1
 
 VM_TYPE="unknown"
 grep "Server" vm_version.out > ${NULL}