Merge
authorkvn
Thu, 08 Dec 2011 10:27:33 -0800
changeset 11201 43efb25e1710
parent 11187 8bf3c2a2b5e4 (current diff)
parent 11200 ccf66f30d4a8 (diff)
child 11207 07d48ddd8231
Merge
hotspot/make/jprt.properties
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/make/jprt.properties	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/make/jprt.properties	Thu Dec 08 10:27:33 2011 -0800
@@ -248,7 +248,7 @@
 
 jprt.my.solaris.sparc.test.targets= \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
     ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
     ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
@@ -267,7 +267,7 @@
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \
-    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
     ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
@@ -276,7 +276,7 @@
 
 jprt.my.solaris.sparcv9.test.targets= \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
     ${jprt.my.solaris.sparcv9}-product-c2-runThese, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \
@@ -294,7 +294,7 @@
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
     ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
@@ -303,7 +303,7 @@
 
 jprt.my.solaris.x64.test.targets= \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
     ${jprt.my.solaris.x64}-product-c2-runThese, \
     ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \
@@ -322,7 +322,7 @@
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
     ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
@@ -331,7 +331,7 @@
 
 jprt.my.solaris.i586.test.targets= \
     ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
     ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
     ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
@@ -358,7 +358,7 @@
     ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
     ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
     ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \
-    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_tiered, \
+    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \
     ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
     ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
     ${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
@@ -366,7 +366,7 @@
 
 jprt.my.linux.i586.test.targets = \
     ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
     ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
     ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
@@ -386,7 +386,7 @@
     ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
     ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
     ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
     ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
     ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
@@ -394,7 +394,7 @@
 
 jprt.my.linux.x64.test.targets = \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
@@ -411,14 +411,14 @@
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
     ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
 
 jprt.my.windows.i586.test.targets = \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
     ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \
     ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \
@@ -438,7 +438,7 @@
     ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
     ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
     ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
     ${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
     ${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
@@ -446,7 +446,7 @@
 
 jprt.my.windows.x64.test.targets = \
     ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_tiered, \
+    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
     ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
     ${jprt.my.windows.x64}-product-c2-runThese, \
     ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \
@@ -465,7 +465,7 @@
     ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
     ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
     ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
+    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
     ${jprt.my.windows.x64}-product-c2-jbb_CMS, \
     ${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
     ${jprt.my.windows.x64}-product-c2-jbb_G1, \
@@ -473,9 +473,9 @@
 
 # Some basic "smoke" tests for OpenJDK builds
 jprt.test.targets.open = \
-    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
-    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
-    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
+    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98, \
+    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \
+    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
 
 # Testing for actual embedded builds is different to standard
 jprt.my.linux.i586.test.targets.embedded = \
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -855,12 +855,6 @@
     Lookaside  = 1 << 4
   };
 
-  // test if x is within signed immediate range for nbits
-  static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 )  <= x   &&   x  <  ( intptr_t(1) << nbits-1 ); }
-
-  // test if -4096 <= x <= 4095
-  static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
-
   static bool is_in_wdisp_range(address a, address b, int nbits) {
     intptr_t d = intptr_t(b) - intptr_t(a);
     return is_simm(d, nbits + 2);
@@ -1203,7 +1197,7 @@
     if (!UseCBCond || cbcond_before()) return false;
     intptr_t x = intptr_t(target_distance(L)) - intptr_t(pc());
     assert( (x & 3) == 0, "not word aligned");
-    return is_simm(x, 12);
+    return is_simm12(x);
   }
 
   // Tells assembler you know that next instruction is delayed
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -765,7 +765,7 @@
 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
   add_debug_info_for_null_check_here(op->info());
   __ load_klass(O0, G3_scratch);
-  if (__ is_simm13(op->vtable_offset())) {
+  if (Assembler::is_simm13(op->vtable_offset())) {
     __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
   } else {
     // This will generate 2 instructions
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -42,7 +42,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(bool, TieredCompilation,            true);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            140000);
 
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -315,7 +315,7 @@
   __ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4);
   extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
   __ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
-  assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
+  assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
   __ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4);
   __ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
   __ BIND(L_ok_4);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -767,7 +767,7 @@
   // In the 64bit build because of wider slots and STACKBIAS we can run
   // out of bits in the displacement to do loads and stores.  Use g3 as
   // temporary displacement.
-  if (! __ is_simm13(extraspace)) {
+  if (!Assembler::is_simm13(extraspace)) {
     __ set(extraspace, G3_scratch);
     __ sub(SP, G3_scratch, SP);
   } else {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Dec 08 10:27:33 2011 -0800
@@ -566,7 +566,7 @@
     } else {
       klass_load_size = 1*BytesPerInstWord;
     }
-    if( Assembler::is_simm13(v_off) ) {
+    if (Assembler::is_simm13(v_off)) {
       return klass_load_size +
              (2*BytesPerInstWord +           // ld_ptr, ld_ptr
              NativeCall::instruction_size);  // call; delay slot
@@ -1019,8 +1019,21 @@
 
 
 //=============================================================================
-const bool Matcher::constant_table_absolute_addressing = false;
-const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask;
+const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
+
+int Compile::ConstantTable::calculate_table_base_offset() const {
+  if (UseRDPCForConstantTableBase) {
+    // The table base offset might be less but then it fits into
+    // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
+    return Assembler::min_simm13();
+  } else {
+    int offset = -(size() / 2);
+    if (!Assembler::is_simm13(offset)) {
+      offset = Assembler::min_simm13();
+    }
+    return offset;
+  }
+}
 
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   Compile* C = ra_->C;
@@ -1028,8 +1041,9 @@
   MacroAssembler _masm(&cbuf);
 
   Register r = as_Register(ra_->get_encode(this));
-  CodeSection* cs = __ code()->consts();
-  int consts_size = cs->align_at_start(cs->size());
+  CodeSection* consts_section = __ code()->consts();
+  int consts_size = consts_section->align_at_start(consts_section->size());
+  assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
 
   if (UseRDPCForConstantTableBase) {
     // For the following RDPC logic to work correctly the consts
@@ -1037,30 +1051,37 @@
     // assert checks for that.  The layout and the SECT_* constants
     // are defined in src/share/vm/asm/codeBuffer.hpp.
     assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
-    int offset = __ offset();
+    int insts_offset = __ offset();
+
+    // Layout:
+    //
+    // |----------- consts section ------------|----------- insts section -----------...
+    // |------ constant table -----|- padding -|------------------x----
+    //                                                            \ current PC (RDPC instruction)
+    // |<------------- consts_size ----------->|<- insts_offset ->|
+    //                                                            \ table base
+    // The table base offset is later added to the load displacement
+    // so it has to be negative.
+    int table_base_offset = -(consts_size + insts_offset);
     int disp;
 
     // If the displacement from the current PC to the constant table
     // base fits into simm13 we set the constant table base to the
     // current PC.
-    if (__ is_simm13(-(consts_size + offset))) {
-      constant_table.set_table_base_offset(-(consts_size + offset));
+    if (Assembler::is_simm13(table_base_offset)) {
+      constant_table.set_table_base_offset(table_base_offset);
       disp = 0;
     } else {
-      // If the offset of the top constant (last entry in the table)
-      // fits into simm13 we set the constant table base to the actual
-      // table base.
-      if (__ is_simm13(constant_table.top_offset())) {
-        constant_table.set_table_base_offset(0);
-        disp = consts_size + offset;
-      } else {
-        // Otherwise we set the constant table base in the middle of the
-        // constant table.
-        int half_consts_size = consts_size / 2;
-        assert(half_consts_size * 2 == consts_size, "sanity");
-        constant_table.set_table_base_offset(-half_consts_size);  // table base offset gets added to the load displacement.
-        disp = half_consts_size + offset;
-      }
+      // Otherwise we set the constant table base offset to the
+      // maximum negative displacement of load instructions to keep
+      // the disp as small as possible:
+      //
+      // |<------------- consts_size ----------->|<- insts_offset ->|
+      // |<--------- min_simm13 --------->|<-------- disp --------->|
+      //                                  \ table base
+      table_base_offset = Assembler::min_simm13();
+      constant_table.set_table_base_offset(table_base_offset);
+      disp = (consts_size + insts_offset) + table_base_offset;
     }
 
     __ rdpc(r);
@@ -1072,8 +1093,7 @@
   }
   else {
     // Materialize the constant table base.
-    assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
-    address baseaddr = cs->start() + -(constant_table.table_base_offset());
+    address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
     RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
     AddressLiteral base(baseaddr, rspec);
     __ set(base, r);
@@ -1169,6 +1189,13 @@
     __ save(SP, G3, SP);
   }
   C->set_frame_complete( __ offset() );
+
+  if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
+    // NOTE: We set the table base offset here because users might be
+    // emitted before MachConstantBaseNode.
+    Compile::ConstantTable& constant_table = C->constant_table();
+    constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
+  }
 }
 
 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@@ -1843,7 +1870,7 @@
 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
   // The passed offset is relative to address of the branch.
   // Don't need to adjust the offset.
-  return UseCBCond && Assembler::is_simm(offset, 12);
+  return UseCBCond && Assembler::is_simm12(offset);
 }
 
 const bool Matcher::isSimpleConstant64(jlong value) {
@@ -1997,7 +2024,7 @@
 }
 
 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
-  return L7_REGP_mask;
+  return L7_REGP_mask();
 }
 
 %}
@@ -2072,8 +2099,8 @@
   %}
 
   enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
-    assert( Assembler::is_simm13($mem$$disp  ), "need disp and disp+4" );
-    assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
+    assert(Assembler::is_simm13($mem$$disp  ), "need disp and disp+4");
+    assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
     guarantee($mem$$index == R_G0_enc, "double index?");
     emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
     emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg );
@@ -2082,8 +2109,8 @@
   %}
 
   enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
-    assert( Assembler::is_simm13($mem$$disp  ), "need disp and disp+4" );
-    assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
+    assert(Assembler::is_simm13($mem$$disp  ), "need disp and disp+4");
+    assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
     guarantee($mem$$index == R_G0_enc, "double index?");
     // Load long with 2 instructions
     emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg+0 );
@@ -2563,7 +2590,7 @@
       }
       int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
       int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
-      if( __ is_simm13(v_off) ) {
+      if (Assembler::is_simm13(v_off)) {
         __ ld_ptr(G3, v_off, G5_method);
       } else {
         // Generate 2 instructions
@@ -3336,7 +3363,7 @@
 
 // Integer Immediate: 8-bit
 operand immI8() %{
-  predicate(Assembler::is_simm(n->get_int(), 8));
+  predicate(Assembler::is_simm8(n->get_int()));
   match(ConI);
   op_cost(0);
   format %{ %}
@@ -3365,7 +3392,7 @@
 
 // Integer Immediate: 16-bit
 operand immI16() %{
-  predicate(Assembler::is_simm(n->get_int(), 16));
+  predicate(Assembler::is_simm16(n->get_int()));
   match(ConI);
   op_cost(0);
   format %{ %}
@@ -3393,7 +3420,7 @@
 
 // Integer Immediate: 11-bit
 operand immI11() %{
-  predicate(Assembler::is_simm(n->get_int(),11));
+  predicate(Assembler::is_simm11(n->get_int()));
   match(ConI);
   op_cost(0);
   format %{ %}
@@ -3402,7 +3429,7 @@
 
 // Integer Immediate: 5-bit
 operand immI5() %{
-  predicate(Assembler::is_simm(n->get_int(), 5));
+  predicate(Assembler::is_simm5(n->get_int()));
   match(ConI);
   op_cost(0);
   format %{ %}
@@ -3634,7 +3661,7 @@
 
 // Integer Immediate: 5-bit
 operand immL5() %{
-  predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm((int)n->get_long(), 5));
+  predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
   match(ConL);
   op_cost(0);
   format %{ %}
@@ -9251,13 +9278,16 @@
 
   format %{  "ADD    $constanttablebase, $constantoffset, O7\n\t"
              "LD     [O7 + $switch_val], O7\n\t"
-             "JUMP   O7"
-         %}
+             "JUMP   O7" %}
   ins_encode %{
     // Calculate table address into a register.
     Register table_reg;
     Register label_reg = O7;
-    if (constant_offset() == 0) {
+    // If we are calculating the size of this instruction don't trust
+    // zero offsets because they might change when
+    // MachConstantBaseNode decides to optimize the constant table
+    // base.
+    if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
       table_reg = $constanttablebase;
     } else {
       table_reg = O7;
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,7 +83,7 @@
   }
 #endif
   int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
-  if( __ is_simm13(v_off) ) {
+  if (Assembler::is_simm13(v_off)) {
     __ ld_ptr(G3, v_off, G5_method);
   } else {
     __ set(v_off,G5);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -3535,7 +3535,8 @@
 // addressing.
 bool Assembler::is_polling_page_far() {
   intptr_t addr = (intptr_t)os::get_polling_page();
-  return !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
+  return ForceUnreachable ||
+         !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
          !is_simm32(addr - (intptr_t)CodeCache::high_bound());
 }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -693,17 +693,6 @@
   static address locate_next_instruction(address inst);
 
   // Utilities
-
-#ifdef _LP64
- static bool is_simm(int64_t x, int nbits) { return -(CONST64(1) << (nbits-1)) <= x &&
-                                                    x < (CONST64(1) << (nbits-1)); }
- static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; }
-#else
- static bool is_simm(int32_t x, int nbits) { return -(1 << (nbits-1)) <= x &&
-                                                    x < (1 << (nbits-1)); }
- static bool is_simm32(int32_t x) { return true; }
-#endif // _LP64
-
   static bool is_polling_page_far() NOT_LP64({ return false;});
 
   // Generic instructions
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -44,7 +44,7 @@
 #else
 define_pd_global(bool, ProfileInterpreter,           true);
 #endif // CC_INTERP
-define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(bool, TieredCompilation,            true);
 define_pd_global(intx, CompileThreshold,             10000);
 define_pd_global(intx, BackEdgeThreshold,            100000);
 
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -27,7 +27,7 @@
 
 // Adapters
 enum /* platform_dependent_constants */ {
-  adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
+  adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000))
 };
 
 public:
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -95,6 +95,7 @@
 #define inc_counter_np(counter) (0)
 #else
   void inc_counter_np_(int& counter) {
+    // This can destroy rscratch1 if counter is far from the code cache
     __ incrementl(ExternalAddress((address)&counter));
   }
 #define inc_counter_np(counter) \
@@ -1268,7 +1269,7 @@
            __ subptr(end, start); // number of bytes to copy
 
           intptr_t disp = (intptr_t) ct->byte_map_base;
-          if (__ is_simm32(disp)) {
+          if (Assembler::is_simm32(disp)) {
             Address cardtable(noreg, noreg, Address::no_scale, disp);
             __ lea(scratch, cardtable);
           } else {
@@ -1466,8 +1467,8 @@
     __ movb(Address(end_to, 8), rax);
 
   __ BIND(L_exit);
-    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1555,8 +1556,8 @@
     __ decrement(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1564,8 +1565,8 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1658,8 +1659,8 @@
     __ movw(Address(end_to, 8), rax);
 
   __ BIND(L_exit);
-    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1759,8 +1760,8 @@
     __ decrement(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1768,8 +1769,8 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1859,8 +1860,8 @@
       __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
       gen_write_ref_array_post_barrier(saved_to, end_to, rax);
     }
-    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1940,11 +1941,11 @@
     __ decrement(qword_count);
     __ jcc(Assembler::notZero, L_copy_8_bytes);
 
-    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
     if (is_oop) {
       __ jmp(L_exit);
     }
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -1952,7 +1953,6 @@
     // Copy in 32-bytes chunks
     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
 
-   inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
    __ bind(L_exit);
      if (is_oop) {
        Register end_to = rdx;
@@ -1960,6 +1960,7 @@
        gen_write_ref_array_post_barrier(to, end_to, rax);
      }
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -2032,8 +2033,8 @@
     if (is_oop) {
       __ jmp(L_exit);
     } else {
-      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
       restore_arg_regs();
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
       __ xorptr(rax, rax); // return 0
       __ leave(); // required for proper stackwalking of RuntimeStub frame
       __ ret(0);
@@ -2045,11 +2046,13 @@
     if (is_oop) {
     __ BIND(L_exit);
       gen_write_ref_array_post_barrier(saved_to, end_to, rax);
-      inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
-    } else {
-      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
     }
     restore_arg_regs();
+    if (is_oop) {
+      inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
+    } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
+    }
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -2113,8 +2116,8 @@
     if (is_oop) {
       __ jmp(L_exit);
     } else {
-      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
       restore_arg_regs();
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
       __ xorptr(rax, rax); // return 0
       __ leave(); // required for proper stackwalking of RuntimeStub frame
       __ ret(0);
@@ -2127,11 +2130,13 @@
     __ BIND(L_exit);
       __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
       gen_write_ref_array_post_barrier(to, rcx, rax);
-      inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
-    } else {
-      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
     }
     restore_arg_regs();
+    if (is_oop) {
+      inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
+    } else {
+      inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
+    }
     __ xorptr(rax, rax); // return 0
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
@@ -2331,8 +2336,8 @@
     __ BIND(L_done);
     __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
     __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
-    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
     restore_arg_regs();
+    inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Dec 08 10:27:33 2011 -0800
@@ -507,9 +507,12 @@
 
 
 //=============================================================================
-const bool Matcher::constant_table_absolute_addressing = true;
 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 
+int Compile::ConstantTable::calculate_table_base_offset() const {
+  return 0;  // absolute addressing, no offset
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   // Empty encoding
 }
@@ -639,6 +642,12 @@
   }
 #endif
 
+  if (C->has_mach_constant_base_node()) {
+    // NOTE: We set the table base offset here because users might be
+    // emitted before MachConstantBaseNode.
+    Compile::ConstantTable& constant_table = C->constant_table();
+    constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
+  }
 }
 
 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@@ -1515,12 +1524,12 @@
 
 // Register for DIVI projection of divmodI
 RegMask Matcher::divI_proj_mask() {
-  return EAX_REG_mask;
+  return EAX_REG_mask();
 }
 
 // Register for MODI projection of divmodI
 RegMask Matcher::modI_proj_mask() {
-  return EDX_REG_mask;
+  return EDX_REG_mask();
 }
 
 // Register for DIVL projection of divmodL
@@ -1536,7 +1545,7 @@
 }
 
 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
-  return EBP_REG_mask;
+  return EBP_REG_mask();
 }
 
 // Returns true if the high 32 bits of the value is known to be zero.
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Dec 08 10:27:33 2011 -0800
@@ -843,9 +843,12 @@
 
 
 //=============================================================================
-const bool Matcher::constant_table_absolute_addressing = true;
 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 
+int Compile::ConstantTable::calculate_table_base_offset() const {
+  return 0;  // absolute addressing, no offset
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   // Empty encoding
 }
@@ -977,6 +980,13 @@
     masm.bind(L);
   }
 #endif
+
+  if (C->has_mach_constant_base_node()) {
+    // NOTE: We set the table base offset here because users might be
+    // emitted before MachConstantBaseNode.
+    Compile::ConstantTable& constant_table = C->constant_table();
+    constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
+  }
 }
 
 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
@@ -2079,26 +2089,26 @@
 
 // Register for DIVI projection of divmodI
 RegMask Matcher::divI_proj_mask() {
-  return INT_RAX_REG_mask;
+  return INT_RAX_REG_mask();
 }
 
 // Register for MODI projection of divmodI
 RegMask Matcher::modI_proj_mask() {
-  return INT_RDX_REG_mask;
+  return INT_RDX_REG_mask();
 }
 
 // Register for DIVL projection of divmodL
 RegMask Matcher::divL_proj_mask() {
-  return LONG_RAX_REG_mask;
+  return LONG_RAX_REG_mask();
 }
 
 // Register for MODL projection of divmodL
 RegMask Matcher::modL_proj_mask() {
-  return LONG_RDX_REG_mask;
+  return LONG_RDX_REG_mask();
 }
 
 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
-  return PTR_RBP_REG_mask;
+  return PTR_RBP_REG_mask();
 }
 
 static Address build_address(int b, int i, int s, int d) {
--- a/hotspot/src/share/vm/adlc/adlparse.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/adlparse.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -982,27 +982,9 @@
       }
       if (strcmp(token,"interpreter_frame_pointer")==0) {
         interpreter_frame_pointer_parse(frame, false);
-        // Add  reg_class interpreter_frame_pointer_reg
-        if( _AD._register != NULL ) {
-          RegClass *reg_class = _AD._register->addRegClass("interpreter_frame_pointer_reg");
-          char *interpreter_frame_pointer_reg = frame->_interpreter_frame_pointer_reg;
-          if( interpreter_frame_pointer_reg != NULL ) {
-            RegDef *regDef = _AD._register->getRegDef(interpreter_frame_pointer_reg);
-            reg_class->addReg(regDef);     // add regDef to regClass
-          }
-        }
       }
       if (strcmp(token,"inline_cache_reg")==0) {
         inline_cache_parse(frame, false);
-        // Add  reg_class inline_cache_reg
-        if( _AD._register != NULL ) {
-          RegClass *reg_class = _AD._register->addRegClass("inline_cache_reg");
-          char *inline_cache_reg = frame->_inline_cache_reg;
-          if( inline_cache_reg != NULL ) {
-            RegDef *regDef = _AD._register->getRegDef(inline_cache_reg);
-            reg_class->addReg(regDef);     // add regDef to regClass
-          }
-        }
       }
       if (strcmp(token,"compiler_method_oop_reg")==0) {
         parse_err(WARN, "Using obsolete Token, compiler_method_oop_reg");
@@ -1010,15 +992,6 @@
       }
       if (strcmp(token,"interpreter_method_oop_reg")==0) {
         interpreter_method_oop_parse(frame, false);
-        // Add  reg_class interpreter_method_oop_reg
-        if( _AD._register != NULL ) {
-          RegClass *reg_class = _AD._register->addRegClass("interpreter_method_oop_reg");
-          char *method_oop_reg = frame->_interpreter_method_oop_reg;
-          if( method_oop_reg != NULL ) {
-            RegDef *regDef = _AD._register->getRegDef(method_oop_reg);
-            reg_class->addReg(regDef);     // add regDef to regClass
-          }
-        }
       }
       if (strcmp(token,"cisc_spilling_operand_name")==0) {
         cisc_spilling_operand_name_parse(frame, false);
@@ -2363,6 +2336,14 @@
       }
     }
     next_char();                  // Skip closing ')'
+  } else if (_curchar == '%') {
+    char *code = find_cpp_block("reg class");
+    if (code == NULL) {
+      parse_err(SYNERR, "missing code declaration for reg class.\n");
+      return;
+    }
+    reg_class->_user_defined = code;
+    return;
   }
 
   // Check for terminating ';'
@@ -3115,7 +3096,7 @@
   encoding->add_code("    _constant = C->constant_table().add");
 
   // Parse everything in ( ) expression.
-  encoding->add_code("(");
+  encoding->add_code("(this, ");
   next_char();  // Skip '('
   int parens_depth = 1;
 
@@ -3130,7 +3111,8 @@
     }
     else if (_curchar == ')') {
       parens_depth--;
-      encoding->add_code(")");
+      if (parens_depth > 0)
+        encoding->add_code(")");
       next_char();
     }
     else {
@@ -3157,7 +3139,7 @@
   }
 
   // Finish code line.
-  encoding->add_code(";");
+  encoding->add_code(");");
 
   if (_AD._adlocation_debug) {
     encoding->add_code(end_line_marker());
@@ -3817,7 +3799,7 @@
     return;
   }
   // Get list of effect-operand pairs and insert into dictionary
-  else get_effectlist(instr->_effects, instr->_localNames);
+  else get_effectlist(instr->_effects, instr->_localNames, instr->_has_call);
 
   // Debug Stuff
   if (_AD._adl_debug > 1) fprintf(stderr,"Effect description: %s\n", desc);
@@ -4595,7 +4577,7 @@
 // effect, and the second must be the name of an operand defined in the
 // operand list of this instruction.  Stores the names with a pointer to the
 // effect form in a local effects table.
-void ADLParser::get_effectlist(FormDict &effects, FormDict &operands) {
+void ADLParser::get_effectlist(FormDict &effects, FormDict &operands, bool& has_call) {
   OperandForm *opForm;
   Effect      *eForm;
   char        *ident;
@@ -4628,26 +4610,31 @@
       // Debugging Stuff
     if (_AD._adl_debug > 1) fprintf(stderr, "\tEffect Type: %s\t", ident);
     skipws();
-    // Get name of operand and check that it is in the local name table
-    if( (ident = get_unique_ident(effects, "effect")) == NULL) {
-      parse_err(SYNERR, "missing operand identifier in effect list\n");
-      return;
+    if (eForm->is(Component::CALL)) {
+      if (_AD._adl_debug > 1) fprintf(stderr, "\n");
+      has_call = true;
+    } else {
+      // Get name of operand and check that it is in the local name table
+      if( (ident = get_unique_ident(effects, "effect")) == NULL) {
+        parse_err(SYNERR, "missing operand identifier in effect list\n");
+        return;
+      }
+      const Form *form = operands[ident];
+      opForm = form ? form->is_operand() : NULL;
+      if( opForm == NULL ) {
+        if( form && form->is_opclass() ) {
+          const char* cname = form->is_opclass()->_ident;
+          parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
+        } else {
+          parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
+        }
+        return;
+      }
+      // Add the pair to the effects table
+      effects.Insert(ident, eForm);
+      // Debugging Stuff
+      if (_AD._adl_debug > 1) fprintf(stderr, "\tOperand Name: %s\n", ident);
     }
-    const Form *form = operands[ident];
-    opForm = form ? form->is_operand() : NULL;
-    if( opForm == NULL ) {
-      if( form && form->is_opclass() ) {
-        const char* cname = form->is_opclass()->_ident;
-        parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
-      } else {
-        parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
-      }
-      return;
-    }
-    // Add the pair to the effects table
-    effects.Insert(ident, eForm);
-    // Debugging Stuff
-    if (_AD._adl_debug > 1) fprintf(stderr, "\tOperand Name: %s\n", ident);
     skipws();
   } while(_curchar == ',');
 
--- a/hotspot/src/share/vm/adlc/adlparse.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/adlparse.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -232,7 +232,7 @@
   char *get_relation_dup(void);
 
   void  get_oplist(NameList &parameters, FormDict &operands);// Parse type-operand pairs
-  void  get_effectlist(FormDict &effects, FormDict &operands); // Parse effect-operand pairs
+  void  get_effectlist(FormDict &effects, FormDict &operands, bool& has_call); // Parse effect-operand pairs
   // Return the contents of a parenthesized expression.
   // Requires initial '(' and consumes final ')', which is replaced by '\0'.
   char *get_paren_expr(const char *description, bool include_location = false);
--- a/hotspot/src/share/vm/adlc/archDesc.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -823,9 +823,9 @@
   } else {
     char       *rc_name = toUpper(reg_class_name);
     const char *mask    = "_mask";
-    int         length  = (int)strlen(rc_name) + (int)strlen(mask) + 3;
+    int         length  = (int)strlen(rc_name) + (int)strlen(mask) + 5;
     char       *regMask = new char[length];
-    sprintf(regMask,"%s%s", rc_name, mask);
+    sprintf(regMask,"%s%s()", rc_name, mask);
     return regMask;
   }
 }
@@ -1018,6 +1018,9 @@
     ident = "TEMP";
     eForm = new Effect(ident);
     _globalNames.Insert(ident, eForm);
+    ident = "CALL";
+    eForm = new Effect(ident);
+    _globalNames.Insert(ident, eForm);
   }
 
   //
--- a/hotspot/src/share/vm/adlc/formsopt.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/formsopt.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -219,7 +219,9 @@
 
 //------------------------------RegClass---------------------------------------
 // Construct a register class into which registers will be inserted
-RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena) {
+RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena),
+                                          _user_defined(NULL)
+{
 }
 
 // record a register in this class
--- a/hotspot/src/share/vm/adlc/formsopt.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/formsopt.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -161,6 +161,7 @@
   NameList    _regDefs;         // List of registers in class
   Dict        _regDef;          // Dictionary of registers in class
   bool        _stack_or_reg;    // Allowed on any stack slot
+  char*       _user_defined;
 
   // Public Methods
   RegClass(const char *classid);// Constructor
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -31,7 +31,8 @@
   : _ident(id), _ideal_only(ideal_only),
     _localNames(cmpstr, hashstr, Form::arena),
     _effects(cmpstr, hashstr, Form::arena),
-    _is_mach_constant(false)
+    _is_mach_constant(false),
+    _has_call(false)
 {
       _ftype = Form::INS;
 
@@ -62,7 +63,8 @@
   : _ident(id), _ideal_only(false),
     _localNames(instr->_localNames),
     _effects(instr->_effects),
-    _is_mach_constant(false)
+    _is_mach_constant(false),
+    _has_call(false)
 {
       _ftype = Form::INS;
 
@@ -1754,6 +1756,7 @@
   if(!strcmp(name, "USE_KILL")) return Component::USE_KILL;
   if(!strcmp(name, "TEMP")) return Component::TEMP;
   if(!strcmp(name, "INVALID")) return Component::INVALID;
+  if(!strcmp(name, "CALL")) return Component::CALL;
   assert( false,"Invalid effect name specified\n");
   return Component::INVALID;
 }
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -111,6 +111,8 @@
   ComponentList  _components;      // List of Components matches MachNode's
                                    // operand structure
 
+  bool           _has_call;        // contain a call and caller save registers should be saved?
+
   // Public Methods
   InstructForm(const char *id, bool ideal_only = false);
   InstructForm(const char *id, InstructForm *instr, MatchRule *rule);
@@ -895,7 +897,8 @@
     DEF     = 0x2, USE_DEF   = 0x3,
     KILL    = 0x4, USE_KILL  = 0x5,
     SYNTHETIC = 0x8,
-    TEMP = USE | SYNTHETIC
+    TEMP = USE | SYNTHETIC,
+    CALL = 0x10
   };
 };
 
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -162,11 +162,17 @@
       RegClass   *reg_class = _register->getRegClass(rc_name);
       assert( reg_class, "Using an undefined register class");
 
-      int len = RegisterForm::RegMask_Size();
-      fprintf(fp_hpp, "extern const RegMask %s%s_mask;\n", prefix, toUpper( rc_name ) );
+      if (reg_class->_user_defined == NULL) {
+        fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix, toUpper( rc_name ) );
+        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ));
+      } else {
+        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, toUpper( rc_name ), reg_class->_user_defined);
+      }
 
       if( reg_class->_stack_or_reg ) {
-        fprintf(fp_hpp, "extern const RegMask %sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
+        assert(reg_class->_user_defined == NULL, "no user defined reg class here");
+        fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
+        fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ) );
       }
     }
   }
@@ -188,8 +194,10 @@
       RegClass   *reg_class = _register->getRegClass(rc_name);
       assert( reg_class, "Using an undefined register class");
 
+      if (reg_class->_user_defined != NULL) continue;
+
       int len = RegisterForm::RegMask_Size();
-      fprintf(fp_cpp, "const RegMask %s%s_mask(", prefix, toUpper( rc_name ) );
+      fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, toUpper( rc_name ) );
       { int i;
         for( i = 0; i < len-1; i++ )
           fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,false));
@@ -198,7 +206,7 @@
 
       if( reg_class->_stack_or_reg ) {
         int i;
-        fprintf(fp_cpp, "const RegMask %sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
+        fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
         for( i = 0; i < len-1; i++ )
           fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,true));
         fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,true));
@@ -2585,9 +2593,9 @@
   // Output instruction's emit prototype
   fprintf(fp, "void %sNode::eval_constant(Compile* C) {\n", inst._ident);
 
-  // For ideal jump nodes, allocate a jump table.
+  // For ideal jump nodes, add a jump-table entry.
   if (inst.is_ideal_jump()) {
-    fprintf(fp, "  _constant = C->constant_table().allocate_jump_table(this);\n");
+    fprintf(fp, "  _constant = C->constant_table().add_jump_table(this);\n");
   }
 
   // If user did not define an encode section,
@@ -2690,7 +2698,7 @@
       if (strcmp(first_reg_class, "stack_slots") == 0) {
         fprintf(fp,"  return &(Compile::current()->FIRST_STACK_mask());\n");
       } else {
-        fprintf(fp,"  return &%s_mask;\n", toUpper(first_reg_class));
+        fprintf(fp,"  return &%s_mask();\n", toUpper(first_reg_class));
       }
     } else {
       // Build a switch statement to return the desired mask.
@@ -2702,7 +2710,7 @@
         if( !strcmp(reg_class, "stack_slots") ) {
           fprintf(fp, "  case %d: return &(Compile::current()->FIRST_STACK_mask());\n", index);
         } else {
-          fprintf(fp, "  case %d: return &%s_mask;\n", index, toUpper(reg_class));
+          fprintf(fp, "  case %d: return &%s_mask();\n", index, toUpper(reg_class));
         }
       }
       fprintf(fp,"  }\n");
@@ -4080,8 +4088,6 @@
   fprintf(fp_cpp,"OptoReg::Name Matcher::inline_cache_reg() {");
   fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
           _frame->_inline_cache_reg);
-  fprintf(fp_cpp,"const RegMask &Matcher::inline_cache_reg_mask() {");
-  fprintf(fp_cpp," return INLINE_CACHE_REG_mask; }\n\n");
   fprintf(fp_cpp,"int Matcher::inline_cache_reg_encode() {");
   fprintf(fp_cpp," return _regEncode[inline_cache_reg()]; }\n\n");
 
@@ -4089,8 +4095,6 @@
   fprintf(fp_cpp,"OptoReg::Name Matcher::interpreter_method_oop_reg() {");
   fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
           _frame->_interpreter_method_oop_reg);
-  fprintf(fp_cpp,"const RegMask &Matcher::interpreter_method_oop_reg_mask() {");
-  fprintf(fp_cpp," return INTERPRETER_METHOD_OOP_REG_mask; }\n\n");
   fprintf(fp_cpp,"int Matcher::interpreter_method_oop_reg_encode() {");
   fprintf(fp_cpp," return _regEncode[interpreter_method_oop_reg()]; }\n\n");
 
@@ -4101,11 +4105,6 @@
   else
     fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
             _frame->_interpreter_frame_pointer_reg);
-  fprintf(fp_cpp,"const RegMask &Matcher::interpreter_frame_pointer_reg_mask() {");
-  if (_frame->_interpreter_frame_pointer_reg == NULL)
-    fprintf(fp_cpp," static RegMask dummy; return dummy; }\n\n");
-  else
-    fprintf(fp_cpp," return INTERPRETER_FRAME_POINTER_REG_mask; }\n\n");
 
   // Frame Pointer definition
   /* CNC - I can not contemplate having a different frame pointer between
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1720,6 +1720,16 @@
       }
     }
 
+    // flag: if this instruction is implemented with a call
+    if ( instr->_has_call ) {
+      if ( node_flags_set ) {
+        fprintf(fp," | Flag_has_call");
+      } else {
+        fprintf(fp,"init_flags(Flag_has_call");
+        node_flags_set = true;
+      }
+    }
+
     if ( node_flags_set ) {
       fprintf(fp,"); ");
     }
--- a/hotspot/src/share/vm/asm/assembler.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/asm/assembler.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -257,6 +257,29 @@
   // ensure buf contains all code (call this before using/copying the code)
   void flush();
 
+  // min and max values for signed immediate ranges
+  static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1))    ; }
+  static int max_simm(int nbits) { return  (intptr_t(1) << (nbits - 1)) - 1; }
+
+  // Define some:
+  static int min_simm10() { return min_simm(10); }
+  static int min_simm13() { return min_simm(13); }
+  static int min_simm16() { return min_simm(16); }
+
+  // Test if x is within signed immediate range for nbits
+  static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
+
+  // Define some:
+  static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
+  static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
+  static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
+  static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
+  static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
+  static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
+  static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
+  static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
+  static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
+
   // Accessors
   CodeBuffer*   code() const;          // _code_section->outer()
   CodeSection*  code_section() const   { return _code_section; }
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -3495,9 +3495,6 @@
     if (profile_calls()) {
       profile_call(recv, holder_known ? callee->holder() : NULL);
     }
-    if (profile_inlined_calls()) {
-      profile_invocation(callee, copy_state_before());
-    }
   }
 
   // Introduce a new callee continuation point - if the callee has
@@ -3571,6 +3568,10 @@
     append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
   }
 
+  if (profile_inlined_calls()) {
+    profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
+  }
+
   BlockBegin* callee_start_block = block_at(0);
   if (callee_start_block != NULL) {
     assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -501,6 +501,7 @@
   virtual RoundFP*          as_RoundFP()         { return NULL; }
   virtual ExceptionObject*  as_ExceptionObject() { return NULL; }
   virtual UnsafeOp*         as_UnsafeOp()        { return NULL; }
+  virtual ProfileInvoke*    as_ProfileInvoke()   { return NULL; }
 
   virtual void visit(InstructionVisitor* v)      = 0;
 
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -429,7 +429,7 @@
         // all locals are dead on exit from the synthetic unlocker
         liveness.clear();
       } else {
-        assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
+        assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
       }
     }
     if (!liveness.is_valid()) {
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -150,11 +150,23 @@
   clear_bits(vars, _arg_local);
 }
 
-void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars) {
+void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) {
   clear_bits(vars, _arg_local);
   clear_bits(vars, _arg_stack);
   if (vars.contains_allocated())
     _allocated_escapes = true;
+
+  if (merge && !vars.is_empty()) {
+    // Merge new state into already processed block.
+    // New state is not taken into account and
+    // it may invalidate set_returned() result.
+    if (vars.contains_unknown() || vars.contains_allocated()) {
+      _return_local = false;
+    }
+    if (vars.contains_unknown() || vars.contains_vars()) {
+      _return_allocated = false;
+    }
+  }
 }
 
 void BCEscapeAnalyzer::set_dirty(ArgumentMap vars) {
@@ -999,7 +1011,7 @@
       t.set_difference(d_state->_stack[i]);
       extra_vars.set_union(t);
     }
-    set_global_escape(extra_vars);
+    set_global_escape(extra_vars, true);
   }
 }
 
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -81,7 +81,7 @@
   bool is_arg_stack(ArgumentMap vars);
   void clear_bits(ArgumentMap vars, VectorSet &bs);
   void set_method_escape(ArgumentMap vars);
-  void set_global_escape(ArgumentMap vars);
+  void set_global_escape(ArgumentMap vars, bool merge = false);
   void set_dirty(ArgumentMap vars);
   void set_modified(ArgumentMap vars, int offs, int size);
 
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -295,12 +295,6 @@
   // Print the name of this method in various incarnations.
   void print_name(outputStream* st = tty);
   void print_short_name(outputStream* st = tty);
-
-  methodOop get_method_handle_target() {
-    KlassHandle receiver_limit; int flags = 0;
-    methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags);
-    return m();
-  }
 };
 
 #endif // SHARE_VM_CI_CIMETHOD_HPP
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1748,7 +1748,7 @@
     tty->print("%4d ", compile_id);    // print compilation number
     tty->print("%s ", (is_osr ? "%" : " "));
     int code_size = (task->code() == NULL) ? 0 : task->code()->total_size();
-    tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, time.milliseconds(), task->num_inlined_bytecodes());
+    tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes());
   }
 
   if (compilable == ciEnv::MethodCompilable_never) {
--- a/hotspot/src/share/vm/opto/block.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/block.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -898,45 +898,41 @@
 void PhaseCFG::verify( ) const {
 #ifdef ASSERT
   // Verify sane CFG
-  for( uint i = 0; i < _num_blocks; i++ ) {
+  for (uint i = 0; i < _num_blocks; i++) {
     Block *b = _blocks[i];
     uint cnt = b->_nodes.size();
     uint j;
-    for( j = 0; j < cnt; j++ ) {
+    for (j = 0; j < cnt; j++)  {
       Node *n = b->_nodes[j];
       assert( _bbs[n->_idx] == b, "" );
-      if( j >= 1 && n->is_Mach() &&
-          n->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
-        assert( j == 1 || b->_nodes[j-1]->is_Phi(),
-                "CreateEx must be first instruction in block" );
+      if (j >= 1 && n->is_Mach() &&
+          n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
+        assert(j == 1 || b->_nodes[j-1]->is_Phi(),
+               "CreateEx must be first instruction in block");
       }
-      for( uint k = 0; k < n->req(); k++ ) {
+      for (uint k = 0; k < n->req(); k++) {
         Node *def = n->in(k);
-        if( def && def != n ) {
-          assert( _bbs[def->_idx] || def->is_Con(),
-                  "must have block; constants for debug info ok" );
+        if (def && def != n) {
+          assert(_bbs[def->_idx] || def->is_Con(),
+                 "must have block; constants for debug info ok");
           // Verify that instructions in the block is in correct order.
           // Uses must follow their definition if they are at the same block.
           // Mostly done to check that MachSpillCopy nodes are placed correctly
           // when CreateEx node is moved in build_ifg_physical().
-          if( _bbs[def->_idx] == b &&
+          if (_bbs[def->_idx] == b &&
               !(b->head()->is_Loop() && n->is_Phi()) &&
               // See (+++) comment in reg_split.cpp
-              !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
+              !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
             bool is_loop = false;
             if (n->is_Phi()) {
-              for( uint l = 1; l < def->req(); l++ ) {
+              for (uint l = 1; l < def->req(); l++) {
                 if (n == def->in(l)) {
                   is_loop = true;
                   break; // Some kind of loop
                 }
               }
             }
-            assert( is_loop || b->find_node(def) < j, "uses must follow definitions" );
-          }
-          if( def->is_SafePointScalarObject() ) {
-            assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node");
-            assert(_bbs[def->_idx] == _bbs[def->in(0)->_idx], "SafePointScalarObject Node should be at the same block as its control edge");
+            assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
           }
         }
       }
@@ -946,12 +942,11 @@
     Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
     assert( bp, "last instruction must be a block proj" );
     assert( bp == b->_nodes[j], "wrong number of successors for this block" );
-    if( bp->is_Catch() ) {
-      while( b->_nodes[--j]->is_MachProj() ) ;
-      assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
-    }
-    else if( bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If ) {
-      assert( b->_num_succs == 2, "Conditional branch must have two targets");
+    if (bp->is_Catch()) {
+      while (b->_nodes[--j]->is_MachProj()) ;
+      assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call");
+    } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
+      assert(b->_num_succs == 2, "Conditional branch must have two targets");
     }
   }
 #endif
--- a/hotspot/src/share/vm/opto/block.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/block.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -281,6 +281,8 @@
   // Find and remove n from block list
   void find_remove( const Node *n );
 
+  // helper function that adds caller save registers to MachProjNode
+  void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   // Schedule a call next in the block
   uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
 
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -456,6 +456,12 @@
   product(intx, EliminateAllocationArraySizeLimit, 64,                      \
           "Array size (number of elements) limit for scalar replacement")   \
                                                                             \
+  product(bool, OptimizePtrCompare, true,                                   \
+          "Use escape analysis to optimize pointers compare")               \
+                                                                            \
+  notproduct(bool, PrintOptimizePtrCompare, false,                          \
+          "Print information about optimized pointers compare")             \
+                                                                            \
   product(bool, UseOptoBiasInlining, true,                                  \
           "Generate biased locking code in C2 ideal graph")                 \
                                                                             \
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -318,17 +318,17 @@
   return new DirectCallGenerator(m, separate_io_proj);
 }
 
-CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
-  assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
-  return new DynamicCallGenerator(m);
-}
-
 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
   assert(!m->is_static(), "for_virtual_call mismatch");
   assert(!m->is_method_handle_invoke(), "should be a direct call");
   return new VirtualCallGenerator(m, vtable_index);
 }
 
+CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
+  assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
+  return new DynamicCallGenerator(m);
+}
+
 // Allow inlining decisions to be delayed
 class LateInlineCallGenerator : public DirectCallGenerator {
   CallGenerator* _inline_cg;
@@ -576,7 +576,9 @@
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
       slow_jvms = _if_missed->generate(kit.sync_jvms());
-      assert(slow_jvms != NULL, "miss path must not fail to generate");
+      if (kit.failing())
+        return NULL;  // might happen because of NodeCountInliningCutoff
+      assert(slow_jvms != NULL, "must be");
       kit.add_exception_states_from(slow_jvms);
       kit.set_map(slow_jvms->map());
       if (!kit.stopped())
@@ -682,6 +684,15 @@
 }
 
 
+CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms,
+                                                     ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
+  assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch");
+  CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile);
+  if (cg != NULL)
+    return cg;
+  return CallGenerator::for_direct_call(callee);
+}
+
 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
                                                        ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
   if (method_handle->Opcode() == Op_ConP) {
@@ -721,8 +732,8 @@
     // Generate a guard so that each can be inlined.  We might want to
     // do more inputs at later point but this gets the most common
     // case.
-    CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
-    CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
+    CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
+    CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
     if (cg1 != NULL && cg2 != NULL) {
       const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
       ciObject* const_oop = oop_ptr->const_oop();
@@ -733,6 +744,17 @@
   return NULL;
 }
 
+CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
+  assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch");
+  // Get the CallSite object.
+  ciBytecodeStream str(caller);
+  str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
+  ciCallSite* call_site = str.get_call_site();
+  CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile);
+  if (cg != NULL)
+    return cg;
+  return CallGenerator::for_dynamic_call(callee);
+}
 
 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
                                                        ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
@@ -819,7 +841,9 @@
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
       slow_jvms = _if_missed->generate(kit.sync_jvms());
-      assert(slow_jvms != NULL, "miss path must not fail to generate");
+      if (kit.failing())
+        return NULL;  // might happen because of NodeCountInliningCutoff
+      assert(slow_jvms != NULL, "must be");
       kit.add_exception_states_from(slow_jvms);
       kit.set_map(slow_jvms->map());
       if (!kit.stopped())
--- a/hotspot/src/share/vm/opto/callGenerator.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -108,8 +108,11 @@
 
   // How to generate vanilla out-of-line call sites:
   static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false);   // static, special
+  static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
   static CallGenerator* for_dynamic_call(ciMethod* m);   // invokedynamic
-  static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
+
+  static CallGenerator* for_method_handle_call(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
+  static CallGenerator* for_invokedynamic_call(                     JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
 
   static CallGenerator* for_method_handle_inline(Node* method_handle,   JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
   static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
--- a/hotspot/src/share/vm/opto/callnode.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1071,8 +1071,11 @@
   init_class_id(Class_SafePointScalarObject);
 }
 
-bool SafePointScalarObjectNode::pinned() const { return true; }
-bool SafePointScalarObjectNode::depends_only_on_test() const { return false; }
+// Do not allow value-numbering for SafePointScalarObject node.
+uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
+uint SafePointScalarObjectNode::cmp( const Node &n ) const {
+  return (&n == this); // Always fail except on self
+}
 
 uint SafePointScalarObjectNode::ideal_reg() const {
   return 0; // No matching to machine instruction
@@ -1096,7 +1099,6 @@
   if (cached != NULL) {
     return (SafePointScalarObjectNode*)cached;
   }
-  Compile* C = Compile::current();
   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
   res->_first_index += jvms_adj;
   sosn_map->Insert((void*)this, (void*)res);
@@ -1142,6 +1144,8 @@
 
 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   if (remove_dead_region(phase, can_reshape))  return this;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
 
   const Type* type = phase->type(Ideal_length());
   if (type->isa_int() && type->is_int()->_hi < 0) {
@@ -1522,13 +1526,16 @@
 
   // perform any generic optimizations first (returns 'this' or NULL)
   Node *result = SafePointNode::Ideal(phase, can_reshape);
+  if (result != NULL)  return result;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
 
   // Now see if we can optimize away this lock.  We don't actually
   // remove the locking here, we simply set the _eliminate flag which
   // prevents macro expansion from expanding the lock.  Since we don't
   // modify the graph, the value returned from this function is the
   // one computed above.
-  if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
+  if (can_reshape && EliminateLocks && (!is_eliminated() || is_coarsened())) {
     //
     // If we are locking an unescaped object, the lock/unlock is unnecessary
     //
@@ -1537,8 +1544,16 @@
     if (cgr != NULL)
       es = cgr->escape_state(obj_node());
     if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
-      // Mark it eliminated to update any counters
-      this->set_eliminated();
+      if (!is_eliminated()) {
+        // Mark it eliminated to update any counters
+        this->set_eliminated();
+      } else {
+        assert(is_coarsened(), "sanity");
+        // The lock could be marked eliminated by lock coarsening
+        // code during first IGVN before EA. Clear coarsened flag
+        // to eliminate all associated locks/unlocks.
+        this->clear_coarsened();
+      }
       return result;
     }
 
@@ -1546,7 +1561,7 @@
     // Try lock coarsening
     //
     PhaseIterGVN* iter = phase->is_IterGVN();
-    if (iter != NULL) {
+    if (iter != NULL && !is_eliminated()) {
 
       GrowableArray<AbstractLockNode*>   lock_ops;
 
@@ -1602,7 +1617,7 @@
           lock->set_eliminated();
           lock->set_coarsened();
         }
-      } else if (result != NULL && ctrl->is_Region() &&
+      } else if (ctrl->is_Region() &&
                  iter->_worklist.member(ctrl)) {
         // We weren't able to find any opportunities but the region this
         // lock is control dependent on hasn't been processed yet so put
@@ -1623,7 +1638,10 @@
 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 
   // perform any generic optimizations first (returns 'this' or NULL)
-  Node * result = SafePointNode::Ideal(phase, can_reshape);
+  Node *result = SafePointNode::Ideal(phase, can_reshape);
+  if (result != NULL)  return result;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
 
   // Now see if we can optimize away this unlock.  We don't actually
   // remove the unlocking here, we simply set the _eliminate flag which
@@ -1631,7 +1649,7 @@
   // modify the graph, the value returned from this function is the
   // one computed above.
   // Escape state is defined after Parse phase.
-  if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
+  if (can_reshape && EliminateLocks && (!is_eliminated() || is_coarsened())) {
     //
     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
     //
@@ -1640,8 +1658,16 @@
     if (cgr != NULL)
       es = cgr->escape_state(obj_node());
     if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
-      // Mark it eliminated to update any counters
-      this->set_eliminated();
+      if (!is_eliminated()) {
+        // Mark it eliminated to update any counters
+        this->set_eliminated();
+      } else {
+        assert(is_coarsened(), "sanity");
+        // The lock could be marked eliminated by lock coarsening
+        // code during first IGVN before EA. Clear coarsened flag
+        // to eliminate all associated locks/unlocks.
+        this->clear_coarsened();
+      }
     }
   }
   return result;
--- a/hotspot/src/share/vm/opto/callnode.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -440,6 +440,10 @@
                      // states of the scalarized object fields are collected.
   uint _n_fields;    // Number of non-static fields of the scalarized object.
   DEBUG_ONLY(AllocateNode* _alloc;)
+
+  virtual uint hash() const ; // { return NO_HASH; }
+  virtual uint cmp( const Node &n ) const;
+
 public:
   SafePointScalarObjectNode(const TypeOopPtr* tp,
 #ifdef ASSERT
@@ -454,15 +458,10 @@
 
   uint first_index() const { return _first_index; }
   uint n_fields()    const { return _n_fields; }
-  DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; })
 
-  // SafePointScalarObject should be always pinned to the control edge
-  // of the SafePoint node for which it was generated.
-  virtual bool pinned() const; // { return true; }
-
-  // SafePointScalarObject depends on the SafePoint node
-  // for which it was generated.
-  virtual bool depends_only_on_test() const; // { return false; }
+#ifdef ASSERT
+  AllocateNode* alloc() const { return _alloc; }
+#endif
 
   virtual uint size_of() const { return sizeof(*this); }
 
@@ -880,6 +879,7 @@
 
   bool is_coarsened()  { return _coarsened; }
   void set_coarsened() { _coarsened = true; }
+  void clear_coarsened() { _coarsened = false; }
 
   // locking does not modify its arguments
   virtual bool        may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -460,8 +460,11 @@
     // Is it dead loop?
     // If it is LoopNopde it had 2 (+1 itself) inputs and
     // one of them was cut. The loop is dead if it was EntryContol.
-    assert(!this->is_Loop() || cnt_orig == 3, "Loop node should have 3 inputs");
-    if (this->is_Loop() && del_it == LoopNode::EntryControl ||
+    // Loop node may have only one input because entry path
+    // is removed in PhaseIdealLoop::Dominators().
+    assert(!this->is_Loop() || cnt_orig <= 3, "Loop node should have 3 or less inputs");
+    if (this->is_Loop() && (del_it == LoopNode::EntryControl ||
+                            del_it == 0 && is_unreachable_region(phase)) ||
        !this->is_Loop() && has_phis && is_unreachable_region(phase)) {
       // Yes,  the region will be removed during the next step below.
       // Cut the backedge input and remove phis since no data paths left.
@@ -1585,14 +1588,17 @@
     // Only one not-NULL unique input path is left.
     // Determine if this input is backedge of a loop.
     // (Skip new phis which have no uses and dead regions).
-    if( outcnt() > 0 && r->in(0) != NULL ) {
+    if (outcnt() > 0 && r->in(0) != NULL) {
       // First, take the short cut when we know it is a loop and
       // the EntryControl data path is dead.
-      assert(!r->is_Loop() || r->req() == 3, "Loop node should have 3 inputs");
+      // Loop node may have only one input because entry path
+      // is removed in PhaseIdealLoop::Dominators().
+      assert(!r->is_Loop() || r->req() <= 3, "Loop node should have 3 or less inputs");
+      bool is_loop = (r->is_Loop() && r->req() == 3);
       // Then, check if there is a data loop when phi references itself directly
       // or through other data nodes.
-      if( r->is_Loop() && !phase->eqv_uncast(uin, in(LoopNode::EntryControl)) ||
-         !r->is_Loop() && is_unsafe_data_reference(uin) ) {
+      if (is_loop && !phase->eqv_uncast(uin, in(LoopNode::EntryControl)) ||
+         !is_loop && is_unsafe_data_reference(uin)) {
         // Break this data loop to avoid creation of a dead loop.
         if (can_reshape) {
           return top;
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -1711,11 +1711,22 @@
 
     if (failing())  return;
 
+    // Optimize out fields loads from scalar replaceable allocations.
     igvn.optimize();
     print_method("Iter GVN after EA", 2);
 
     if (failing())  return;
 
+    if (congraph() != NULL && macro_count() > 0) {
+      PhaseMacroExpand mexp(igvn);
+      mexp.eliminate_macro_nodes();
+      igvn.set_delay_transform(false);
+
+      igvn.optimize();
+      print_method("Iter GVN after eliminating allocations and locks", 2);
+
+      if (failing())  return;
+    }
   }
 
   // Loop transforms on the ideal graph.  Range Check Elimination,
@@ -3052,24 +3063,13 @@
   return false;
 }
 
-// Emit constants grouped in the following order:
-static BasicType type_order[] = {
-  T_FLOAT,    // 32-bit
-  T_OBJECT,   // 32 or 64-bit
-  T_ADDRESS,  // 32 or 64-bit
-  T_DOUBLE,   // 64-bit
-  T_LONG,     // 64-bit
-  T_VOID,     // 32 or 64-bit (jump-tables are at the end of the constant table for code emission reasons)
-  T_ILLEGAL
-};
-
 static int type_to_size_in_bytes(BasicType t) {
   switch (t) {
   case T_LONG:    return sizeof(jlong  );
   case T_FLOAT:   return sizeof(jfloat );
   case T_DOUBLE:  return sizeof(jdouble);
     // We use T_VOID as marker for jump-table entries (labels) which
-    // need an interal word relocation.
+    // need an internal word relocation.
   case T_VOID:
   case T_ADDRESS:
   case T_OBJECT:  return sizeof(jobject);
@@ -3079,87 +3079,92 @@
   return -1;
 }
 
+int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
+  // sort descending
+  if (a->freq() > b->freq())  return -1;
+  if (a->freq() < b->freq())  return  1;
+  return 0;
+}
+
 void Compile::ConstantTable::calculate_offsets_and_size() {
-  int size = 0;
-  for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
-    BasicType type = type_order[t];
-
-    for (int i = 0; i < _constants.length(); i++) {
-      Constant con = _constants.at(i);
-      if (con.type() != type)  continue;  // Skip other types.
-
-      // Align size for type.
-      int typesize = type_to_size_in_bytes(con.type());
-      size = align_size_up(size, typesize);
-
-      // Set offset.
-      con.set_offset(size);
-      _constants.at_put(i, con);
-
-      // Add type size.
-      size = size + typesize;
+  // First, sort the array by frequencies.
+  _constants.sort(qsort_comparator);
+
+#ifdef ASSERT
+  // Make sure all jump-table entries were sorted to the end of the
+  // array (they have a negative frequency).
+  bool found_void = false;
+  for (int i = 0; i < _constants.length(); i++) {
+    Constant con = _constants.at(i);
+    if (con.type() == T_VOID)
+      found_void = true;  // jump-tables
+    else
+      assert(!found_void, "wrong sorting");
+  }
+#endif
+
+  int offset = 0;
+  for (int i = 0; i < _constants.length(); i++) {
+    Constant* con = _constants.adr_at(i);
+
+    // Align offset for type.
+    int typesize = type_to_size_in_bytes(con->type());
+    offset = align_size_up(offset, typesize);
+    con->set_offset(offset);   // set constant's offset
+
+    if (con->type() == T_VOID) {
+      MachConstantNode* n = (MachConstantNode*) con->get_jobject();
+      offset = offset + typesize * n->outcnt();  // expand jump-table
+    } else {
+      offset = offset + typesize;
     }
   }
 
   // Align size up to the next section start (which is insts; see
   // CodeBuffer::align_at_start).
   assert(_size == -1, "already set?");
-  _size = align_size_up(size, CodeEntryAlignment);
-
-  if (Matcher::constant_table_absolute_addressing) {
-    set_table_base_offset(0);  // No table base offset required
-  } else {
-    if (UseRDPCForConstantTableBase) {
-      // table base offset is set in MachConstantBaseNode::emit
-    } else {
-      // When RDPC is not used, the table base is set into the middle of
-      // the constant table.
-      int half_size = _size / 2;
-      assert(half_size * 2 == _size, "sanity");
-      set_table_base_offset(-half_size);
-    }
-  }
+  _size = align_size_up(offset, CodeEntryAlignment);
 }
 
 void Compile::ConstantTable::emit(CodeBuffer& cb) {
   MacroAssembler _masm(&cb);
-  for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
-    BasicType type = type_order[t];
-
-    for (int i = 0; i < _constants.length(); i++) {
-      Constant con = _constants.at(i);
-      if (con.type() != type)  continue;  // Skip other types.
-
-      address constant_addr;
-      switch (con.type()) {
-      case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
-      case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
-      case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
-      case T_OBJECT: {
-        jobject obj = con.get_jobject();
-        int oop_index = _masm.oop_recorder()->find_index(obj);
-        constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
-        break;
+  for (int i = 0; i < _constants.length(); i++) {
+    Constant con = _constants.at(i);
+    address constant_addr;
+    switch (con.type()) {
+    case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
+    case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
+    case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
+    case T_OBJECT: {
+      jobject obj = con.get_jobject();
+      int oop_index = _masm.oop_recorder()->find_index(obj);
+      constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
+      break;
+    }
+    case T_ADDRESS: {
+      address addr = (address) con.get_jobject();
+      constant_addr = _masm.address_constant(addr);
+      break;
+    }
+    // We use T_VOID as marker for jump-table entries (labels) which
+    // need an internal word relocation.
+    case T_VOID: {
+      MachConstantNode* n = (MachConstantNode*) con.get_jobject();
+      // Fill the jump-table with a dummy word.  The real value is
+      // filled in later in fill_jump_table.
+      address dummy = (address) n;
+      constant_addr = _masm.address_constant(dummy);
+      // Expand jump-table
+      for (uint i = 1; i < n->outcnt(); i++) {
+        address temp_addr = _masm.address_constant(dummy + i);
+        assert(temp_addr, "consts section too small");
       }
-      case T_ADDRESS: {
-        address addr = (address) con.get_jobject();
-        constant_addr = _masm.address_constant(addr);
-        break;
-      }
-      // We use T_VOID as marker for jump-table entries (labels) which
-      // need an interal word relocation.
-      case T_VOID: {
-        // Write a dummy word.  The real value is filled in later
-        // in fill_jump_table_in_constant_table.
-        address addr = (address) con.get_jobject();
-        constant_addr = _masm.address_constant(addr);
-        break;
-      }
-      default: ShouldNotReachHere();
-      }
-      assert(constant_addr != NULL, "consts section too small");
-      assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
+      break;
     }
+    default: ShouldNotReachHere();
+    }
+    assert(constant_addr, "consts section too small");
+    assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
   }
 }
 
@@ -3175,19 +3180,21 @@
   if (con.can_be_reused()) {
     int idx = _constants.find(con);
     if (idx != -1 && _constants.at(idx).can_be_reused()) {
+      _constants.adr_at(idx)->inc_freq(con.freq());  // increase the frequency by the current value
       return;
     }
   }
   (void) _constants.append(con);
 }
 
-Compile::Constant Compile::ConstantTable::add(BasicType type, jvalue value) {
-  Constant con(type, value);
+Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
+  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
+  Constant con(type, value, b->_freq);
   add(con);
   return con;
 }
 
-Compile::Constant Compile::ConstantTable::add(MachOper* oper) {
+Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) {
   jvalue value;
   BasicType type = oper->type()->basic_type();
   switch (type) {
@@ -3198,20 +3205,18 @@
   case T_ADDRESS: value.l = (jobject) oper->constant(); break;
   default: ShouldNotReachHere();
   }
-  return add(type, value);
+  return add(n, type, value);
 }
 
-Compile::Constant Compile::ConstantTable::allocate_jump_table(MachConstantNode* n) {
+Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) {
   jvalue value;
   // We can use the node pointer here to identify the right jump-table
   // as this method is called from Compile::Fill_buffer right before
   // the MachNodes are emitted and the jump-table is filled (means the
   // MachNode pointers do not change anymore).
   value.l = (jobject) n;
-  Constant con(T_VOID, value, false);  // Labels of a jump-table cannot be reused.
-  for (uint i = 0; i < n->outcnt(); i++) {
-    add(con);
-  }
+  Constant con(T_VOID, value, next_jump_table_freq(), false);  // Labels of a jump-table cannot be reused.
+  add(con);
   return con;
 }
 
@@ -3230,9 +3235,9 @@
   MacroAssembler _masm(&cb);
   address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
 
-  for (int i = 0; i < labels.length(); i++) {
+  for (uint i = 0; i < n->outcnt(); i++) {
     address* constant_addr = &jump_table_base[i];
-    assert(*constant_addr == (address) n, "all jump-table entries must contain node pointer");
+    assert(*constant_addr == (((address) n) + i), err_msg("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, *constant_addr, (((address) n) + i)));
     *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
     cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
   }
--- a/hotspot/src/share/vm/opto/compile.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -150,14 +150,16 @@
     BasicType _type;
     jvalue    _value;
     int       _offset;         // offset of this constant (in bytes) relative to the constant table base.
+    float     _freq;
     bool      _can_be_reused;  // true (default) if the value can be shared with other users.
 
   public:
-    Constant() : _type(T_ILLEGAL), _offset(-1), _can_be_reused(true) { _value.l = 0; }
-    Constant(BasicType type, jvalue value, bool can_be_reused = true) :
+    Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _value.l = 0; }
+    Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) :
       _type(type),
       _value(value),
       _offset(-1),
+      _freq(freq),
       _can_be_reused(can_be_reused)
     {}
 
@@ -173,6 +175,9 @@
     int         offset()  const    { return _offset; }
     void    set_offset(int offset) {        _offset = offset; }
 
+    float       freq()    const    { return _freq;         }
+    void    inc_freq(float freq)   {        _freq += freq; }
+
     bool    can_be_reused() const  { return _can_be_reused; }
   };
 
@@ -182,41 +187,51 @@
     GrowableArray<Constant> _constants;          // Constants of this table.
     int                     _size;               // Size in bytes the emitted constant table takes (including padding).
     int                     _table_base_offset;  // Offset of the table base that gets added to the constant offsets.
+    int                     _nof_jump_tables;    // Number of jump-tables in this constant table.
+
+    static int qsort_comparator(Constant* a, Constant* b);
+
+    // We use negative frequencies to keep the order of the
+    // jump-tables in which they were added.  Otherwise we get into
+    // trouble with relocation.
+    float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); }
 
   public:
     ConstantTable() :
       _size(-1),
-      _table_base_offset(-1)  // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
+      _table_base_offset(-1),  // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
+      _nof_jump_tables(0)
     {}
 
-    int size() const { assert(_size != -1, "size not yet calculated"); return _size; }
+    int size() const { assert(_size != -1, "not calculated yet"); return _size; }
 
-    void set_table_base_offset(int x)  { assert(_table_base_offset == -1, "set only once");                        _table_base_offset = x; }
-    int      table_base_offset() const { assert(_table_base_offset != -1, "table base offset not yet set"); return _table_base_offset; }
+    int calculate_table_base_offset() const;  // AD specific
+    void set_table_base_offset(int x)  { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; }
+    int      table_base_offset() const { assert(_table_base_offset != -1, "not set yet");                      return _table_base_offset; }
 
     void emit(CodeBuffer& cb);
 
     // Returns the offset of the last entry (the top) of the constant table.
-    int  top_offset() const { assert(_constants.top().offset() != -1, "constant not yet bound"); return _constants.top().offset(); }
+    int  top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); }
 
     void calculate_offsets_and_size();
     int  find_offset(Constant& con) const;
 
     void     add(Constant& con);
-    Constant add(BasicType type, jvalue value);
-    Constant add(MachOper* oper);
-    Constant add(jfloat f) {
+    Constant add(MachConstantNode* n, BasicType type, jvalue value);
+    Constant add(MachConstantNode* n, MachOper* oper);
+    Constant add(MachConstantNode* n, jfloat f) {
       jvalue value; value.f = f;
-      return add(T_FLOAT, value);
+      return add(n, T_FLOAT, value);
     }
-    Constant add(jdouble d) {
+    Constant add(MachConstantNode* n, jdouble d) {
       jvalue value; value.d = d;
-      return add(T_DOUBLE, value);
+      return add(n, T_DOUBLE, value);
     }
 
-    // Jump table
-    Constant allocate_jump_table(MachConstantNode* n);
-    void         fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
+    // Jump-table
+    Constant  add_jump_table(MachConstantNode* n);
+    void     fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
   };
 
  private:
--- a/hotspot/src/share/vm/opto/doCall.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -62,7 +62,6 @@
 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
                                        JVMState* jvms, bool allow_inline,
                                        float prof_factor) {
-  CallGenerator*  cg;
   ciMethod*       caller   = jvms->method();
   int             bci      = jvms->bci();
   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
@@ -110,7 +109,7 @@
   // We do this before the strict f.p. check below because the
   // intrinsics handle strict f.p. correctly.
   if (allow_inline) {
-    cg = find_intrinsic(call_method, call_is_virtual);
+    CallGenerator* cg = find_intrinsic(call_method, call_is_virtual);
     if (cg != NULL)  return cg;
   }
 
@@ -121,33 +120,16 @@
   if (call_method->is_method_handle_invoke()) {
     if (bytecode != Bytecodes::_invokedynamic) {
       GraphKit kit(jvms);
-      Node* n = kit.argument(0);
-
-      CallGenerator* cg = CallGenerator::for_method_handle_inline(n, jvms, caller, call_method, profile);
-      if (cg != NULL) {
-        return cg;
-      }
-      return CallGenerator::for_direct_call(call_method);
+      Node* method_handle = kit.argument(0);
+      return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile);
     }
     else {
-      // Get the CallSite object.
-      ciMethod* caller_method = jvms->method();
-      ciBytecodeStream str(caller_method);
-      str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
-      ciCallSite* call_site = str.get_call_site();
-
-      CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
-      if (cg != NULL) {
-        return cg;
-      }
-      // If something failed, generate a normal dynamic call.
-      return CallGenerator::for_dynamic_call(call_method);
+      return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile);
     }
   }
 
   // Do not inline strict fp into non-strict code, or the reverse
-  bool caller_method_is_strict = jvms->method()->is_strict();
-  if( caller_method_is_strict ^ call_method->is_strict() ) {
+  if (caller->is_strict() ^ call_method->is_strict()) {
     allow_inline = false;
   }
 
@@ -258,7 +240,7 @@
             }
             if (miss_cg != NULL) {
               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
-              cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
+              CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
               if (cg != NULL)  return cg;
             }
           }
--- a/hotspot/src/share/vm/opto/escape.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -119,6 +119,8 @@
   } else {
     _noop_null = _oop_null; // Should be initialized
   }
+  _pcmp_neq = NULL; // Should be initialized
+  _pcmp_eq  = NULL;
 }
 
 void ConnectionGraph::add_pointsto_edge(uint from_i, uint to_i) {
@@ -128,6 +130,13 @@
   assert(f->node_type() != PointsToNode::UnknownType && t->node_type() != PointsToNode::UnknownType, "node types must be set");
   assert(f->node_type() == PointsToNode::LocalVar || f->node_type() == PointsToNode::Field, "invalid source of PointsTo edge");
   assert(t->node_type() == PointsToNode::JavaObject, "invalid destination of PointsTo edge");
+  if (to_i == _phantom_object) { // Quick test for most common object
+    if (f->has_unknown_ptr()) {
+      return;
+    } else {
+      f->set_has_unknown_ptr();
+    }
+  }
   add_edge(f, to_i, PointsToNode::PointsToEdge);
 }
 
@@ -163,6 +172,9 @@
 }
 
 void ConnectionGraph::add_field_edge(uint from_i, uint to_i, int offset) {
+  // Don't add fields to NULL pointer.
+  if (is_null_ptr(from_i))
+    return;
   PointsToNode *f = ptnode_adr(from_i);
   PointsToNode *t = ptnode_adr(to_i);
 
@@ -177,7 +189,7 @@
 
 void ConnectionGraph::set_escape_state(uint ni, PointsToNode::EscapeState es) {
   // Don't change non-escaping state of NULL pointer.
-  if (ni == _noop_null || ni == _oop_null)
+  if (is_null_ptr(ni))
     return;
   PointsToNode *npt = ptnode_adr(ni);
   PointsToNode::EscapeState old_es = npt->escape_state();
@@ -309,6 +321,9 @@
 
   visited->set(ni);
   PointsToNode *ptn = ptnode_adr(ni);
+  assert(ptn->node_type() == PointsToNode::LocalVar ||
+         ptn->node_type() == PointsToNode::Field, "sanity");
+  assert(ptn->edge_count() != 0, "should have at least phantom_object");
 
   // Mark current edges as visited and move deferred edges to separate array.
   for (uint i = 0; i < ptn->edge_count(); ) {
@@ -329,6 +344,7 @@
     uint t = deferred_edges->at(next);
     PointsToNode *ptt = ptnode_adr(t);
     uint e_cnt = ptt->edge_count();
+    assert(e_cnt != 0, "should have at least phantom_object");
     for (uint e = 0; e < e_cnt; e++) {
       uint etgt = ptt->edge_target(e);
       if (visited->test_set(etgt))
@@ -337,10 +353,6 @@
       PointsToNode::EdgeType et = ptt->edge_type(e);
       if (et == PointsToNode::PointsToEdge) {
         add_pointsto_edge(ni, etgt);
-        if(etgt == _phantom_object) {
-          // Special case - field set outside (globally escaping).
-          set_escape_state(ni, PointsToNode::GlobalEscape);
-        }
       } else if (et == PointsToNode::DeferredEdge) {
         deferred_edges->append(etgt);
       } else {
@@ -348,6 +360,20 @@
       }
     }
   }
+  if (ptn->edge_count() == 0) {
+    // No pointsto edges found after deferred edges are removed.
+    // For example, in the next case where call is replaced
+    // with uncommon trap and as result array's load references
+    // itself through deferred edges:
+    //
+    // A a = b[i];
+    // if (c!=null) a = c.foo();
+    // b[i] = a;
+    //
+    // Assume the value was set outside this method and
+    // add edge to phantom object.
+    add_pointsto_edge(ni, _phantom_object);
+  }
 }
 
 
@@ -356,13 +382,25 @@
 //  a pointsto edge is added if it is a JavaObject
 
 void ConnectionGraph::add_edge_from_fields(uint adr_i, uint to_i, int offs) {
+  // No fields for NULL pointer.
+  if (is_null_ptr(adr_i)) {
+    return;
+  }
   PointsToNode* an = ptnode_adr(adr_i);
   PointsToNode* to = ptnode_adr(to_i);
   bool deferred = (to->node_type() == PointsToNode::LocalVar);
-
+  bool escaped  = (to_i == _phantom_object) && (offs == Type::OffsetTop);
+  if (escaped) {
+    // Values in fields escaped during call.
+    assert(an->escape_state() >= PointsToNode::ArgEscape, "sanity");
+    offs = Type::OffsetBot;
+  }
   for (uint fe = 0; fe < an->edge_count(); fe++) {
     assert(an->edge_type(fe) == PointsToNode::FieldEdge, "expecting a field edge");
     int fi = an->edge_target(fe);
+    if (escaped) {
+      set_escape_state(fi, PointsToNode::GlobalEscape);
+    }
     PointsToNode* pf = ptnode_adr(fi);
     int po = pf->offset();
     if (po == offs || po == Type::OffsetBot || offs == Type::OffsetBot) {
@@ -377,6 +415,15 @@
 // Add a deferred  edge from node given by "from_i" to any field of adr_i
 // whose offset matches "offset".
 void ConnectionGraph::add_deferred_edge_to_fields(uint from_i, uint adr_i, int offs) {
+  // No fields for NULL pointer.
+  if (is_null_ptr(adr_i)) {
+    return;
+  }
+  if (adr_i == _phantom_object) {
+    // Add only one edge for unknown object.
+    add_pointsto_edge(from_i, _phantom_object);
+    return;
+  }
   PointsToNode* an = ptnode_adr(adr_i);
   bool is_alloc = an->_node->is_Allocate();
   for (uint fe = 0; fe < an->edge_count(); fe++) {
@@ -392,6 +439,13 @@
       add_deferred_edge(from_i, fi);
     }
   }
+  // Some fields references (AddP) may still be missing
+  // until Connection Graph construction is complete.
+  // For example, loads from RAW pointers with offset 0
+  // which don't have AddP.
+  // A reference to phantom_object will be added if
+  // a field reference is still missing after completing
+  // Connection Graph (see remove_deferred()).
 }
 
 // Helper functions
@@ -1540,8 +1594,8 @@
 
   GrowableArray<Node*> alloc_worklist;
   GrowableArray<Node*> addp_worklist;
+  GrowableArray<Node*> ptr_cmp_worklist;
   PhaseGVN* igvn = _igvn;
-  bool has_allocations = false;
 
   // Push all useful nodes onto CG list and set their type.
   for( uint next = 0; next < worklist_init.size(); ++next ) {
@@ -1551,11 +1605,8 @@
     // for an escape status. See process_call_result() below.
     if (n->is_Allocate() || n->is_CallStaticJava() &&
         ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
-      has_allocations = true;
-      if (n->is_Allocate())
-        alloc_worklist.append(n);
-    }
-    if(n->is_AddP()) {
+      alloc_worklist.append(n);
+    } else if(n->is_AddP()) {
       // Collect address nodes. Use them during stage 3 below
       // to build initial connection graph field edges.
       addp_worklist.append(n);
@@ -1563,6 +1614,10 @@
       // Collect all MergeMem nodes to add memory slices for
       // scalar replaceable objects in split_unique_types().
       _mergemem_worklist.append(n->as_MergeMem());
+    } else if (OptimizePtrCompare && n->is_Cmp() &&
+               (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) {
+      // Compare pointers nodes
+      ptr_cmp_worklist.append(n);
     }
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       Node* m = n->fast_out(i);   // Get user
@@ -1570,7 +1625,7 @@
     }
   }
 
-  if (!has_allocations) {
+  if (alloc_worklist.length() == 0) {
     _collecting = false;
     return false; // Nothing to do.
   }
@@ -1588,7 +1643,7 @@
   for( uint next = 0; next < addp_length; ++next ) {
     Node* n = addp_worklist.at(next);
     Node* base = get_addp_base(n);
-    if (base->is_Proj())
+    if (base->is_Proj() && base->in(0)->is_Call())
       base = base->in(0);
     PointsToNode::NodeType nt = ptnode_adr(base->_idx)->node_type();
     if (nt == PointsToNode::JavaObject) {
@@ -1653,39 +1708,7 @@
   }
 #undef CG_BUILD_ITER_LIMIT
 
-  Arena* arena = Thread::current()->resource_area();
-  VectorSet visited(arena);
-
-  // 5. Find fields initializing values for not escaped allocations
-  uint alloc_length = alloc_worklist.length();
-  for (uint next = 0; next < alloc_length; ++next) {
-    Node* n = alloc_worklist.at(next);
-    if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
-      find_init_values(n, &visited, igvn);
-    }
-  }
-
-  worklist.clear();
-
-  // 6. Remove deferred edges from the graph.
-  uint cg_length = cg_worklist.length();
-  for (uint next = 0; next < cg_length; ++next) {
-    int ni = cg_worklist.at(next);
-    PointsToNode* ptn = ptnode_adr(ni);
-    PointsToNode::NodeType nt = ptn->node_type();
-    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
-      remove_deferred(ni, &worklist, &visited);
-      Node *n = ptn->_node;
-    }
-  }
-
-  // 7. Adjust escape state of nonescaping objects.
-  for (uint next = 0; next < addp_length; ++next) {
-    Node* n = addp_worklist.at(next);
-    adjust_escape_state(n);
-  }
-
-  // 8. Propagate escape states.
+  // 5. Propagate escaped states.
   worklist.clear();
 
   // mark all nodes reachable from GlobalEscape nodes
@@ -1694,20 +1717,72 @@
   // mark all nodes reachable from ArgEscape nodes
   bool has_non_escaping_obj = propagate_escape_state(&cg_worklist, &worklist, PointsToNode::ArgEscape);
 
+  Arena* arena = Thread::current()->resource_area();
+  VectorSet visited(arena);
+
+  // 6. Find fields initializing values for not escaped allocations
+  uint alloc_length = alloc_worklist.length();
+  for (uint next = 0; next < alloc_length; ++next) {
+    Node* n = alloc_worklist.at(next);
+    if (ptnode_adr(n->_idx)->escape_state() == PointsToNode::NoEscape) {
+      has_non_escaping_obj = true;
+      if (n->is_Allocate()) {
+        find_init_values(n, &visited, igvn);
+      }
+    }
+  }
+
+  uint cg_length = cg_worklist.length();
+
+  // Skip the rest of code if all objects escaped.
+  if (!has_non_escaping_obj) {
+    cg_length = 0;
+    addp_length = 0;
+  }
+
+  for (uint next = 0; next < cg_length; ++next) {
+    int ni = cg_worklist.at(next);
+    PointsToNode* ptn = ptnode_adr(ni);
+    PointsToNode::NodeType nt = ptn->node_type();
+    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
+      if (ptn->edge_count() == 0) {
+        // No values were found. Assume the value was set
+        // outside this method - add edge to phantom object.
+        add_pointsto_edge(ni, _phantom_object);
+      }
+    }
+  }
+
+  // 7. Remove deferred edges from the graph.
+  for (uint next = 0; next < cg_length; ++next) {
+    int ni = cg_worklist.at(next);
+    PointsToNode* ptn = ptnode_adr(ni);
+    PointsToNode::NodeType nt = ptn->node_type();
+    if (nt == PointsToNode::LocalVar || nt == PointsToNode::Field) {
+      remove_deferred(ni, &worklist, &visited);
+    }
+  }
+
+  // 8. Adjust escape state of nonescaping objects.
+  for (uint next = 0; next < addp_length; ++next) {
+    Node* n = addp_worklist.at(next);
+    adjust_escape_state(n);
+  }
+
   // push all NoEscape nodes on the worklist
+  worklist.clear();
   for( uint next = 0; next < cg_length; ++next ) {
     int nk = cg_worklist.at(next);
-    if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape)
+    if (ptnode_adr(nk)->escape_state() == PointsToNode::NoEscape &&
+        !is_null_ptr(nk))
       worklist.push(nk);
   }
+
   alloc_worklist.clear();
-  // mark all nodes reachable from NoEscape nodes
+  // Propagate scalar_replaceable value.
   while(worklist.length() > 0) {
     uint nk = worklist.pop();
     PointsToNode* ptn = ptnode_adr(nk);
-    if (ptn->node_type() == PointsToNode::JavaObject &&
-        !(nk == _noop_null || nk == _oop_null))
-      has_non_escaping_obj = true; // Non Escape
     Node* n = ptn->_node;
     bool scalar_replaceable = ptn->scalar_replaceable();
     if (n->is_Allocate() && scalar_replaceable) {
@@ -1719,6 +1794,8 @@
     uint e_cnt = ptn->edge_count();
     for (uint ei = 0; ei < e_cnt; ei++) {
       uint npi = ptn->edge_target(ei);
+      if (is_null_ptr(npi))
+        continue;
       PointsToNode *np = ptnode_adr(npi);
       if (np->escape_state() < PointsToNode::NoEscape) {
         set_escape_state(npi, PointsToNode::NoEscape);
@@ -1727,7 +1804,6 @@
         }
         worklist.push(npi);
       } else if (np->scalar_replaceable() && !scalar_replaceable) {
-        // Propagate scalar_replaceable value.
         np->set_scalar_replaceable(false);
         worklist.push(npi);
       }
@@ -1737,9 +1813,11 @@
   _collecting = false;
   assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
 
-  assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
+  assert(ptnode_adr(_oop_null)->escape_state() == PointsToNode::NoEscape &&
+         ptnode_adr(_oop_null)->edge_count() == 0, "sanity");
   if (UseCompressedOops) {
-    assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape, "sanity");
+    assert(ptnode_adr(_noop_null)->escape_state() == PointsToNode::NoEscape &&
+           ptnode_adr(_noop_null)->edge_count() == 0, "sanity");
   }
 
   if (EliminateLocks && has_non_escaping_obj) {
@@ -1749,18 +1827,53 @@
       Node *n = C->macro_node(i);
       if (n->is_AbstractLock()) { // Lock and Unlock nodes
         AbstractLockNode* alock = n->as_AbstractLock();
-        if (!alock->is_eliminated()) {
+        if (!alock->is_eliminated() || alock->is_coarsened()) {
           PointsToNode::EscapeState es = escape_state(alock->obj_node());
           assert(es != PointsToNode::UnknownEscape, "should know");
           if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
-            // Mark it eliminated
-            alock->set_eliminated();
+            if (!alock->is_eliminated()) {
+              // Mark it eliminated to update any counters
+              alock->set_eliminated();
+            } else {
+              // The lock could be marked eliminated by lock coarsening
+              // code during first IGVN before EA. Clear coarsened flag
+              // to eliminate all associated locks/unlocks and relock
+              // during deoptimization.
+              alock->clear_coarsened();
+            }
           }
         }
       }
     }
   }
 
+  if (OptimizePtrCompare && has_non_escaping_obj) {
+    // Add ConI(#CC_GT) and ConI(#CC_EQ).
+    _pcmp_neq = igvn->makecon(TypeInt::CC_GT);
+    _pcmp_eq = igvn->makecon(TypeInt::CC_EQ);
+    // Optimize objects compare.
+    while (ptr_cmp_worklist.length() != 0) {
+      Node *n = ptr_cmp_worklist.pop();
+      Node *res = optimize_ptr_compare(n);
+      if (res != NULL) {
+#ifndef PRODUCT
+        if (PrintOptimizePtrCompare) {
+          tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ"));
+          if (Verbose) {
+            n->dump(1);
+          }
+        }
+#endif
+        _igvn->replace_node(n, res);
+      }
+    }
+    // cleanup
+    if (_pcmp_neq->outcnt() == 0)
+      igvn->hash_delete(_pcmp_neq);
+    if (_pcmp_eq->outcnt()  == 0)
+      igvn->hash_delete(_pcmp_eq);
+  }
+
 #ifndef PRODUCT
   if (PrintEscapeAnalysis) {
     dump(); // Dump ConnectionGraph
@@ -1821,15 +1934,30 @@
   // Connection Graph does not record a default initialization by NULL
   // captured by Initialize node.
   //
+  uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
   uint ae_cnt = pta->edge_count();
+  bool visited_bottom_offset = false;
   for (uint ei = 0; ei < ae_cnt; ei++) {
     uint nidx = pta->edge_target(ei); // Field (AddP)
     PointsToNode* ptn = ptnode_adr(nidx);
     assert(ptn->_node->is_AddP(), "Should be AddP nodes only");
     int offset = ptn->offset();
-    if (offset != Type::OffsetBot &&
-        offset != oopDesc::klass_offset_in_bytes() &&
-        !visited->test_set(offset)) {
+    if (offset == Type::OffsetBot) {
+      if (!visited_bottom_offset) {
+        visited_bottom_offset = true;
+        // Check only oop fields.
+        const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
+        if (!adr_type->isa_aryptr() ||
+            (adr_type->isa_aryptr()->klass() == NULL) ||
+             adr_type->isa_aryptr()->klass()->is_obj_array_klass()) {
+          // OffsetBot is used to reference array's element,
+          // always add reference to NULL since we don't
+          // known which element is referenced.
+          add_edge_from_fields(alloc->_idx, null_idx, offset);
+        }
+      }
+    } else if (offset != oopDesc::klass_offset_in_bytes() &&
+               !visited->test_set(offset)) {
 
       // Check only oop fields.
       const Type* adr_type = ptn->_node->as_AddP()->bottom_type();
@@ -1904,7 +2032,6 @@
         }
         if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
           // A field's initializing value was not recorded. Add NULL.
-          uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
           add_edge_from_fields(alloc->_idx, null_idx, offset);
         }
       }
@@ -1990,13 +2117,21 @@
   }
   // mark all reachable nodes
   while (worklist->length() > 0) {
-    PointsToNode* ptn = ptnode_adr(worklist->pop());
-    if (ptn->node_type() == PointsToNode::JavaObject) {
+    int pt = worklist->pop();
+    PointsToNode* ptn = ptnode_adr(pt);
+    if (ptn->node_type() == PointsToNode::JavaObject &&
+        !is_null_ptr(pt)) {
       has_java_obj = true;
+      if (esc_state > PointsToNode::NoEscape) {
+        // fields values are unknown if object escapes
+        add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
+      }
     }
     uint e_cnt = ptn->edge_count();
     for (uint ei = 0; ei < e_cnt; ei++) {
       uint npi = ptn->edge_target(ei);
+      if (is_null_ptr(npi))
+        continue;
       PointsToNode *np = ptnode_adr(npi);
       if (np->escape_state() < esc_state) {
         set_escape_state(npi, esc_state);
@@ -2008,8 +2143,100 @@
   return has_java_obj && (esc_state < PointsToNode::GlobalEscape);
 }
 
+// Optimize objects compare.
+Node* ConnectionGraph::optimize_ptr_compare(Node* n) {
+  assert(OptimizePtrCompare, "sanity");
+  // Clone returned Set since PointsTo() returns pointer
+  // to the same structure ConnectionGraph.pt_ptset.
+  VectorSet ptset1 = *PointsTo(n->in(1));
+  VectorSet ptset2 = *PointsTo(n->in(2));
+
+  // Check simple cases first.
+  if (ptset1.Size() == 1) {
+    uint pt1 = ptset1.getelem();
+    PointsToNode* ptn1 = ptnode_adr(pt1);
+    if (ptn1->escape_state() == PointsToNode::NoEscape) {
+      if (ptset2.Size() == 1 && ptset2.getelem() == pt1) {
+        // Comparing the same not escaping object.
+        return _pcmp_eq;
+      }
+      Node* obj = ptn1->_node;
+      // Comparing not escaping allocation.
+      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
+          !ptset2.test(pt1)) {
+        return _pcmp_neq; // This includes nullness check.
+      }
+    }
+  } else if (ptset2.Size() == 1) {
+    uint pt2 = ptset2.getelem();
+    PointsToNode* ptn2 = ptnode_adr(pt2);
+    if (ptn2->escape_state() == PointsToNode::NoEscape) {
+      Node* obj = ptn2->_node;
+      // Comparing not escaping allocation.
+      if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
+          !ptset1.test(pt2)) {
+        return _pcmp_neq; // This includes nullness check.
+      }
+    }
+  }
+
+  if (!ptset1.disjoint(ptset2)) {
+    return NULL; // Sets are not disjoint
+  }
+
+  // Sets are disjoint.
+  bool set1_has_unknown_ptr = ptset1.test(_phantom_object) != 0;
+  bool set2_has_unknown_ptr = ptset2.test(_phantom_object) != 0;
+  bool set1_has_null_ptr   = (ptset1.test(_oop_null) | ptset1.test(_noop_null)) != 0;
+  bool set2_has_null_ptr   = (ptset2.test(_oop_null) | ptset2.test(_noop_null)) != 0;
+
+  if (set1_has_unknown_ptr && set2_has_null_ptr ||
+      set2_has_unknown_ptr && set1_has_null_ptr) {
+    // Check nullness of unknown object.
+    return NULL;
+  }
+
+  // Disjointness by itself is not sufficient since
+  // alias analysis is not complete for escaped objects.
+  // Disjoint sets are definitely unrelated only when
+  // at least one set has only not escaping objects.
+  if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
+    bool has_only_non_escaping_alloc = true;
+    for (VectorSetI i(&ptset1); i.test(); ++i) {
+      uint pt = i.elem;
+      PointsToNode* ptn = ptnode_adr(pt);
+      Node* obj = ptn->_node;
+      if (ptn->escape_state() != PointsToNode::NoEscape ||
+          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
+        has_only_non_escaping_alloc = false;
+        break;
+      }
+    }
+    if (has_only_non_escaping_alloc) {
+      return _pcmp_neq;
+    }
+  }
+  if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
+    bool has_only_non_escaping_alloc = true;
+    for (VectorSetI i(&ptset2); i.test(); ++i) {
+      uint pt = i.elem;
+      PointsToNode* ptn = ptnode_adr(pt);
+      Node* obj = ptn->_node;
+      if (ptn->escape_state() != PointsToNode::NoEscape ||
+          !(obj->is_Allocate() || obj->is_CallStaticJava())) {
+        has_only_non_escaping_alloc = false;
+        break;
+      }
+    }
+    if (has_only_non_escaping_alloc) {
+      return _pcmp_neq;
+    }
+  }
+  return NULL;
+}
+
 void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
-
+    bool is_arraycopy = false;
     switch (call->Opcode()) {
 #ifdef ASSERT
     case Op_Allocate:
@@ -2019,25 +2246,44 @@
       assert(false, "should be done already");
       break;
 #endif
+    case Op_CallLeafNoFP:
+      is_arraycopy = (call->as_CallLeaf()->_name != NULL &&
+                      strstr(call->as_CallLeaf()->_name, "arraycopy") != 0);
+      // fall through
     case Op_CallLeaf:
-    case Op_CallLeafNoFP:
     {
       // Stub calls, objects do not escape but they are not scale replaceable.
       // Adjust escape state for outgoing arguments.
       const TypeTuple * d = call->tf()->domain();
+      bool src_has_oops = false;
       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
         const Type* at = d->field_at(i);
         Node *arg = call->in(i)->uncast();
         const Type *aat = phase->type(arg);
+        PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
-            ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
+            (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
 
           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
                  aat->isa_ptr() != NULL, "expecting an Ptr");
+          bool arg_has_oops = aat->isa_oopptr() &&
+                              (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
+                               (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
+          if (i == TypeFunc::Parms) {
+            src_has_oops = arg_has_oops;
+          }
+          //
+          // src or dst could be j.l.Object when other is basic type array:
+          //
+          //   arraycopy(char[],0,Object*,0,size);
+          //   arraycopy(Object*,0,char[],0,size);
+          //
+          // Don't add edges from dst's fields in such cases.
+          //
+          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
+                                       arg_has_oops && (i > TypeFunc::Parms);
 #ifdef ASSERT
-          if (!(call->Opcode() == Op_CallLeafNoFP &&
-                call->as_CallLeaf()->_name != NULL &&
-                (strstr(call->as_CallLeaf()->_name, "arraycopy")  != 0) ||
+          if (!(is_arraycopy ||
                 call->as_CallLeaf()->_name != NULL &&
                 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
                  strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
@@ -2046,20 +2292,72 @@
             assert(false, "EA: unexpected CallLeaf");
           }
 #endif
+          // Always process arraycopy's destination object since
+          // we need to add all possible edges to references in
+          // source object.
+          if (arg_esc >= PointsToNode::ArgEscape &&
+              !arg_is_arraycopy_dest) {
+            continue;
+          }
           set_escape_state(arg->_idx, PointsToNode::ArgEscape);
+          Node* arg_base = arg;
           if (arg->is_AddP()) {
             //
             // The inline_native_clone() case when the arraycopy stub is called
             // after the allocation before Initialize and CheckCastPP nodes.
+            // Or normal arraycopy for object arrays case.
             //
             // Set AddP's base (Allocate) as not scalar replaceable since
             // pointer to the base (with offset) is passed as argument.
             //
-            arg = get_addp_base(arg);
+            arg_base = get_addp_base(arg);
           }
-          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
-            uint pt = j.elem;
-            set_escape_state(pt, PointsToNode::ArgEscape);
+          VectorSet argset = *PointsTo(arg_base); // Clone set
+          for( VectorSetI j(&argset); j.test(); ++j ) {
+            uint pd = j.elem; // Destination object
+            set_escape_state(pd, PointsToNode::ArgEscape);
+
+            if (arg_is_arraycopy_dest) {
+              PointsToNode* ptd = ptnode_adr(pd);
+              // Conservatively reference an unknown object since
+              // not all source's fields/elements may be known.
+              add_edge_from_fields(pd, _phantom_object, Type::OffsetBot);
+
+              Node *src = call->in(TypeFunc::Parms)->uncast();
+              Node* src_base = src;
+              if (src->is_AddP()) {
+                src_base  = get_addp_base(src);
+              }
+              // Create edges from destination's fields to
+              // everything known source's fields could point to.
+              for( VectorSetI s(PointsTo(src_base)); s.test(); ++s ) {
+                uint ps = s.elem;
+                bool has_bottom_offset = false;
+                for (uint fd = 0; fd < ptd->edge_count(); fd++) {
+                  assert(ptd->edge_type(fd) == PointsToNode::FieldEdge, "expecting a field edge");
+                  int fdi = ptd->edge_target(fd);
+                  PointsToNode* pfd = ptnode_adr(fdi);
+                  int offset = pfd->offset();
+                  if (offset == Type::OffsetBot)
+                    has_bottom_offset = true;
+                  assert(offset != -1, "offset should be set");
+                  add_deferred_edge_to_fields(fdi, ps, offset);
+                }
+                // Destination object may not have access (no field edge)
+                // to fields which are accessed in source object.
+                // As result no edges will be created to those source's
+                // fields and escape state of destination object will
+                // not be propagated to those fields.
+                //
+                // Mark source object as global escape except in
+                // the case with Type::OffsetBot field (which is
+                // common case for array elements access) when
+                // edges are created to all source's fields.
+                if (!has_bottom_offset) {
+                  set_escape_state(ps, PointsToNode::GlobalEscape);
+                }
+              }
+            }
           }
         }
       }
@@ -2102,14 +2400,16 @@
             for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
               uint pt = j.elem;
               if (global_escapes) {
-                //The argument global escapes, mark everything it could point to
+                // The argument global escapes, mark everything it could point to
                 set_escape_state(pt, PointsToNode::GlobalEscape);
+                add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
               } else {
+                set_escape_state(pt, PointsToNode::ArgEscape);
                 if (fields_escapes) {
-                  // The argument itself doesn't escape, but any fields might
-                  add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
+                  // The argument itself doesn't escape, but any fields might.
+                  // Use OffsetTop to indicate such case.
+                  add_edge_from_fields(pt, _phantom_object, Type::OffsetTop);
                 }
-                set_escape_state(pt, PointsToNode::ArgEscape);
               }
             }
           }
@@ -2135,6 +2435,7 @@
           for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
             uint pt = j.elem;
             set_escape_state(pt, PointsToNode::GlobalEscape);
+            add_edge_from_fields(pt, _phantom_object, Type::OffsetBot);
           }
         }
       }
@@ -2235,15 +2536,16 @@
           // it's fields will be marked as NoEscape at least.
           set_escape_state(call_idx, PointsToNode::NoEscape);
           ptnode_adr(call_idx)->set_scalar_replaceable(false);
+          // Fields values are unknown
+          add_edge_from_fields(call_idx, _phantom_object, Type::OffsetBot);
           add_pointsto_edge(resproj_idx, call_idx);
           copy_dependencies = true;
-        } else if (call_analyzer->is_return_local()) {
+        } else {
           // determine whether any arguments are returned
           set_escape_state(call_idx, PointsToNode::ArgEscape);
           bool ret_arg = false;
           for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
             const Type* at = d->field_at(i);
-
             if (at->isa_oopptr() != NULL) {
               Node *arg = call->in(i)->uncast();
 
@@ -2259,17 +2561,14 @@
               }
             }
           }
-          if (done && !ret_arg) {
-            // Returns unknown object.
-            set_escape_state(call_idx, PointsToNode::GlobalEscape);
-            add_pointsto_edge(resproj_idx, _phantom_object);
-          }
           if (done) {
             copy_dependencies = true;
+            // is_return_local() is true when only arguments are returned.
+            if (!ret_arg || !call_analyzer->is_return_local()) {
+              // Returns unknown object.
+              add_pointsto_edge(resproj_idx, _phantom_object);
+            }
           }
-        } else {
-          set_escape_state(call_idx, PointsToNode::GlobalEscape);
-          add_pointsto_edge(resproj_idx, _phantom_object);
         }
         if (copy_dependencies)
           call_analyzer->copy_dependencies(_compile->dependencies());
@@ -2431,6 +2730,11 @@
       add_node(n, PointsToNode::JavaObject, PointsToNode::GlobalEscape, false);
       break;
     }
+    case Op_PartialSubtypeCheck:
+    { // Produces Null or notNull and is used in CmpP.
+      add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
+      break;
+    }
     case Op_Phi:
     {
       const Type *t = n->as_Phi()->type();
@@ -2589,10 +2893,11 @@
     case Op_AddP:
     {
       Node *base = get_addp_base(n);
+      int offset = address_offset(n, phase);
       // Create a field edge to this node from everything base could point to.
       for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
         uint pt = i.elem;
-        add_field_edge(pt, n_idx, address_offset(n, phase));
+        add_field_edge(pt, n_idx, offset);
       }
       break;
     }
@@ -2659,6 +2964,10 @@
       int offset = address_offset(adr, phase);
       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
         uint pt = i.elem;
+        if (adr->is_AddP()) {
+          // Add field edge if it is missing.
+          add_field_edge(pt, adr->_idx, offset);
+        }
         add_deferred_edge_to_fields(n_idx, pt, offset);
       }
       break;
@@ -2668,6 +2977,11 @@
       assert(false, "Op_Parm");
       break;
     }
+    case Op_PartialSubtypeCheck:
+    {
+      assert(false, "Op_PartialSubtypeCheck");
+      break;
+    }
     case Op_Phi:
     {
 #ifdef ASSERT
@@ -2745,11 +3059,14 @@
       assert(adr->is_AddP(), "expecting an AddP");
       Node *adr_base = get_addp_base(adr);
       Node *val = n->in(MemNode::ValueIn)->uncast();
+      int offset = address_offset(adr, phase);
       // For everything "adr_base" could point to, create a deferred edge
       // to "val" from each field with the same offset.
       for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
         uint pt = i.elem;
-        add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
+        // Add field edge if it is missing.
+        add_field_edge(pt, adr->_idx, offset);
+        add_edge_from_fields(pt, val->_idx, offset);
       }
       break;
     }
--- a/hotspot/src/share/vm/opto/escape.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/escape.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -160,6 +160,7 @@
   Node* _node;                 // Ideal node corresponding to this PointsTo node.
   int   _offset;               // Object fields offsets.
   bool  _scalar_replaceable;   // Not escaped object could be replaced with scalar
+  bool  _has_unknown_ptr;      // Has edge to phantom_object
 
 public:
   PointsToNode():
@@ -168,6 +169,7 @@
     _edges(NULL),
     _node(NULL),
     _offset(-1),
+    _has_unknown_ptr(false),
     _scalar_replaceable(true) {}
 
 
@@ -175,6 +177,7 @@
   NodeType node_type() const { return _type;}
   int offset() { return _offset;}
   bool scalar_replaceable() { return _scalar_replaceable;}
+  bool has_unknown_ptr()    { return _has_unknown_ptr;}
 
   void set_offset(int offs) { _offset = offs;}
   void set_escape_state(EscapeState state) { _escape = state; }
@@ -183,6 +186,7 @@
     _type = ntype;
   }
   void set_scalar_replaceable(bool v) { _scalar_replaceable = v; }
+  void set_has_unknown_ptr()          { _has_unknown_ptr = true; }
 
   // count of outgoing edges
   uint edge_count() const { return (_edges == NULL) ? 0 : _edges->length(); }
@@ -236,6 +240,8 @@
                                        // are assumed to point to.
   uint                      _oop_null; // ConP(#NULL)->_idx
   uint                     _noop_null; // ConN(#NULL)->_idx
+  Node*                     _pcmp_neq; // ConI(#CC_GT)
+  Node*                      _pcmp_eq; // ConI(#CC_EQ)
 
   Compile *                  _compile; // Compile object for current compilation
   PhaseIterGVN *                _igvn; // Value numbering
@@ -248,6 +254,8 @@
   }
   uint nodes_size() const { return _nodes.length(); }
 
+  bool is_null_ptr(uint idx) const { return (idx == _noop_null || idx == _oop_null); }
+
   // Add node to ConnectionGraph.
   void add_node(Node *n, PointsToNode::NodeType nt, PointsToNode::EscapeState es, bool done);
 
@@ -331,10 +339,9 @@
   }
 
   // Notify optimizer that a node has been modified
-  // Node:  This assumes that escape analysis is run before
-  //        PhaseIterGVN creation
   void record_for_optimizer(Node *n) {
     _igvn->_worklist.push(n);
+    _igvn->add_users_to_worklist(n);
   }
 
   // Set the escape state of a node
@@ -351,6 +358,9 @@
                               GrowableArray<uint>* worklist,
                               PointsToNode::EscapeState esc_state);
 
+  // Optimize objects compare.
+  Node* optimize_ptr_compare(Node* n);
+
   // Compute the escape information
   bool compute_escape();
 
--- a/hotspot/src/share/vm/opto/gcm.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -95,7 +95,7 @@
   assert(in0 != NULL, "Only control-dependent");
   const Node *p = in0->is_block_proj();
   if (p != NULL && p != n) {    // Control from a block projection?
-    assert(!n->pinned() || n->is_MachConstantBase() || n->is_SafePointScalarObject(), "only pinned MachConstantBase or SafePointScalarObject node is expected here");
+    assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
     // Find trailing Region
     Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
     uint j = 0;
--- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -447,6 +447,9 @@
     if (flags & Node::Flag_may_be_short_branch) {
       print_prop("may_be_short_branch", "true");
     }
+    if (flags & Node::Flag_has_call) {
+      print_prop("has_call", "true");
+    }
 
     if (C->matcher() != NULL) {
       if (C->matcher()->is_shared(node)) {
--- a/hotspot/src/share/vm/opto/lcm.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -548,6 +548,22 @@
   set_next_call(call, next_call, bbs);
 }
 
+//------------------------------add_call_kills-------------------------------------
+void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
+  // Fill in the kill mask for the call
+  for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
+    if( !regs.Member(r) ) {     // Not already defined by the call
+      // Save-on-call register?
+      if ((save_policy[r] == 'C') ||
+          (save_policy[r] == 'A') ||
+          ((save_policy[r] == 'E') && exclude_soe)) {
+        proj->_rout.Insert(r);
+      }
+    }
+  }
+}
+
+
 //------------------------------sched_call-------------------------------------
 uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   RegMask regs;
@@ -631,17 +647,7 @@
       proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
   }
 
-  // Fill in the kill mask for the call
-  for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
-    if( !regs.Member(r) ) {     // Not already defined by the call
-      // Save-on-call register?
-      if ((save_policy[r] == 'C') ||
-          (save_policy[r] == 'A') ||
-          ((save_policy[r] == 'E') && exclude_soe)) {
-        proj->_rout.Insert(r);
-      }
-    }
-  }
+  add_call_kills(proj, regs, save_policy, exclude_soe);
 
   return node_cnt;
 }
@@ -776,6 +782,7 @@
     }
 #endif
 
+  uint max_idx = matcher.C->unique();
   // Pull from worklist and schedule
   while( worklist.size() ) {    // Worklist is not ready
 
@@ -815,11 +822,28 @@
       phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
       continue;
     }
+
+    if (n->is_Mach() && n->as_Mach()->has_call()) {
+      RegMask regs;
+      regs.Insert(matcher.c_frame_pointer());
+      regs.OR(n->out_RegMask());
+
+      MachProjNode *proj = new (matcher.C, 1) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
+      cfg->_bbs.map(proj->_idx,this);
+      _nodes.insert(phi_cnt++, proj);
+
+      add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
+    }
+
     // Children are now all ready
     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
       Node* m = n->fast_out(i5); // Get user
       if( cfg->_bbs[m->_idx] != this ) continue;
       if( m->is_Phi() ) continue;
+      if (m->_idx > max_idx) { // new node, skip it
+        assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
+        continue;
+      }
       if( !--ready_cnt[m->_idx] )
         worklist.push(m);
     }
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -715,7 +715,6 @@
     long limit_con = cl->limit()->get_int();
     julong trip_cnt = cl->trip_count();
     long final_con = init_con + trip_cnt*stride_con;
-    final_con -= stride_con;
     int final_int = (int)final_con;
     // The final value should be in integer range since the loop
     // is counted and the limit was checked for overflow.
@@ -1947,7 +1946,7 @@
   }
 
   // Nothing to do, so get out
-  if( !C->has_loops() && !do_split_ifs && !_verify_me && !_verify_only ) {
+  if( !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only ) {
     _igvn.optimize();           // Cleanup NeverBranches
     return;
   }
--- a/hotspot/src/share/vm/opto/machnode.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -480,21 +480,20 @@
 
 //=============================================================================
 int MachConstantNode::constant_offset() {
-  int offset = _constant.offset();
   // Bind the offset lazily.
-  if (offset == -1) {
+  if (_constant.offset() == -1) {
     Compile::ConstantTable& constant_table = Compile::current()->constant_table();
-    // If called from Compile::scratch_emit_size assume the worst-case
-    // for load offsets: half the constant table size.
-    // NOTE: Don't return or calculate the actual offset (which might
-    // be zero) because that leads to problems with e.g. jumpXtnd on
-    // some architectures (cf. add-optimization in SPARC jumpXtnd).
-    if (Compile::current()->in_scratch_emit_size())
-      return constant_table.size() / 2;
-    offset = constant_table.table_base_offset() + constant_table.find_offset(_constant);
-    _constant.set_offset(offset);
+    int offset = constant_table.find_offset(_constant);
+    // If called from Compile::scratch_emit_size return the
+    // pre-calculated offset.
+    // NOTE: If the AD file does some table base offset optimizations
+    // later the AD file needs to take care of this fact.
+    if (Compile::current()->in_scratch_emit_size()) {
+      return constant_table.calculate_table_base_offset() + offset;
+    }
+    _constant.set_offset(constant_table.table_base_offset() + offset);
   }
-  return offset;
+  return _constant.offset();
 }
 
 
--- a/hotspot/src/share/vm/opto/machnode.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -190,6 +190,9 @@
   // Avoid back to back some instructions on some CPUs.
   bool avoid_back_to_back() const { return (flags() & Flag_avoid_back_to_back) != 0; }
 
+  // instruction implemented with a call
+  bool has_call() const { return (flags() & Flag_has_call) != 0; }
+
   // First index in _in[] corresponding to operand, or -1 if there is none
   int  operand_index(uint operand) const;
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -81,7 +81,7 @@
       uint old_unique = C->unique();
       Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
       if (old_unique != C->unique()) {
-        new_in->set_req(0, newcall->in(0)); // reset control edge
+        new_in->set_req(0, C->root()); // reset control edge
         new_in = transform_later(new_in); // Register new node.
       }
       old_in = new_in;
@@ -565,7 +565,6 @@
   if (res == NULL) {
     // All users were eliminated.
   } else if (!res->is_CheckCastPP()) {
-    alloc->_is_scalar_replaceable = false;  // don't try again
     NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";)
     can_eliminate = false;
   } else {
@@ -719,7 +718,7 @@
                                                  alloc,
 #endif
                                                  first_ind, nfields);
-    sobj->init_req(0, sfpt->in(TypeFunc::Control));
+    sobj->init_req(0, C->root());
     transform_later(sobj);
 
     // Scan object's fields adding an input to the safepoint for each field.
@@ -762,10 +761,10 @@
 
       Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc);
       if (field_val == NULL) {
-        // we weren't able to find a value for this field,
-        // give up on eliminating this allocation
-        alloc->_is_scalar_replaceable = false;  // don't try again
-        // remove any extra entries we added to the safepoint
+        // We weren't able to find a value for this field,
+        // give up on eliminating this allocation.
+
+        // Remove any extra entries we added to the safepoint.
         uint last = sfpt->req() - 1;
         for (int k = 0;  k < j; k++) {
           sfpt->del_req(last--);
@@ -1804,9 +1803,9 @@
   #ifndef PRODUCT
   if (PrintEliminateLocks) {
     if (alock->is_Lock()) {
-      tty->print_cr("++++ Eliminating: %d Lock", alock->_idx);
+      tty->print_cr("++++ Eliminated: %d Lock", alock->_idx);
     } else {
-      tty->print_cr("++++ Eliminating: %d Unlock", alock->_idx);
+      tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx);
     }
   }
   #endif
@@ -2165,11 +2164,12 @@
   _igvn.replace_node(_memproj_fallthrough, mem_phi);
 }
 
-//------------------------------expand_macro_nodes----------------------
-//  Returns true if a failure occurred.
-bool PhaseMacroExpand::expand_macro_nodes() {
+//---------------------------eliminate_macro_nodes----------------------
+// Eliminate scalar replaced allocations and associated locks.
+void PhaseMacroExpand::eliminate_macro_nodes() {
   if (C->macro_count() == 0)
-    return false;
+    return;
+
   // First, attempt to eliminate locks
   int cnt = C->macro_count();
   for (int i=0; i < cnt; i++) {
@@ -2189,14 +2189,6 @@
       debug_only(int old_macro_count = C->macro_count(););
       if (n->is_AbstractLock()) {
         success = eliminate_locking_node(n->as_AbstractLock());
-      } else if (n->Opcode() == Op_LoopLimit) {
-        // Remove it from macro list and put on IGVN worklist to optimize.
-        C->remove_macro_node(n);
-        _igvn._worklist.push(n);
-        success = true;
-      } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
-        _igvn.replace_node(n, n->in(1));
-        success = true;
       }
       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
       progress = progress || success;
@@ -2220,18 +2212,50 @@
         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
         break;
       default:
-        assert(false, "unknown node type in macro list");
+        assert(n->Opcode() == Op_LoopLimit ||
+               n->Opcode() == Op_Opaque1   ||
+               n->Opcode() == Op_Opaque2, "unknown node type in macro list");
       }
       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
       progress = progress || success;
     }
   }
+}
+
+//------------------------------expand_macro_nodes----------------------
+//  Returns true if a failure occurred.
+bool PhaseMacroExpand::expand_macro_nodes() {
+  // Last attempt to eliminate macro nodes.
+  eliminate_macro_nodes();
+
   // Make sure expansion will not cause node limit to be exceeded.
   // Worst case is a macro node gets expanded into about 50 nodes.
   // Allow 50% more for optimization.
   if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) )
     return true;
 
+  // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations.
+  bool progress = true;
+  while (progress) {
+    progress = false;
+    for (int i = C->macro_count(); i > 0; i--) {
+      Node * n = C->macro_node(i-1);
+      bool success = false;
+      debug_only(int old_macro_count = C->macro_count(););
+      if (n->Opcode() == Op_LoopLimit) {
+        // Remove it from macro list and put on IGVN worklist to optimize.
+        C->remove_macro_node(n);
+        _igvn._worklist.push(n);
+        success = true;
+      } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
+        _igvn.replace_node(n, n->in(1));
+        success = true;
+      }
+      assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
+      progress = progress || success;
+    }
+  }
+
   // expand "macro" nodes
   // nodes are removed from the macro list as they are processed
   while (C->macro_count() > 0) {
@@ -2265,5 +2289,6 @@
 
   _igvn.set_delay_transform(false);
   _igvn.optimize();
+  if (C->failing())  return true;
   return false;
 }
--- a/hotspot/src/share/vm/opto/macro.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/macro.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -119,6 +119,7 @@
   PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
     _igvn.set_delay_transform(true);
   }
+  void eliminate_macro_nodes();
   bool expand_macro_nodes();
 
 };
--- a/hotspot/src/share/vm/opto/matcher.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/matcher.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -294,7 +294,6 @@
   RegMask                     _return_value_mask;
   // Inline Cache Register
   static OptoReg::Name  inline_cache_reg();
-  static const RegMask &inline_cache_reg_mask();
   static int            inline_cache_reg_encode();
 
   // Register for DIVI projection of divmodI
@@ -324,7 +323,6 @@
   // and then expanded into the inline_cache_reg and a method_oop register
 
   static OptoReg::Name  interpreter_method_oop_reg();
-  static const RegMask &interpreter_method_oop_reg_mask();
   static int            interpreter_method_oop_reg_encode();
 
   static OptoReg::Name  compiler_method_oop_reg();
@@ -333,7 +331,6 @@
 
   // Interpreter's Frame Pointer Register
   static OptoReg::Name  interpreter_frame_pointer_reg();
-  static const RegMask &interpreter_frame_pointer_reg_mask();
 
   // Java-Native calling convention
   // (what you use when intercalling between Java and C++ code)
@@ -371,10 +368,6 @@
   // registers?  True for Intel but false for most RISCs
   static const bool clone_shift_expressions;
 
-  // Should constant table entries be accessed with loads using
-  // absolute addressing?  True for x86 but false for most RISCs.
-  static const bool constant_table_absolute_addressing;
-
   static bool narrow_oop_use_complex_address();
 
   // Generate implicit null check for narrow oops if it can fold
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -265,6 +265,13 @@
   if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL
   assert( mem != this, "dead loop in MemNode::Ideal" );
 
+  if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
+    // This memory slice may be dead.
+    // Delay this mem node transformation until the memory is processed.
+    phase->is_IterGVN()->_worklist.push(this);
+    return NodeSentinel; // caller will return NULL
+  }
+
   Node *address = in(MemNode::Address);
   const Type *t_adr = phase->type( address );
   if( t_adr == Type::TOP )              return NodeSentinel; // caller will return NULL
@@ -2661,6 +2668,8 @@
 // control copies
 Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   if (remove_dead_region(phase, can_reshape)) return this;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
 
   if (can_reshape) {
     Node* mem = phase->transform(in(MemNode::Memory));
@@ -2675,6 +2684,12 @@
   return NULL;
 }
 
+//------------------------------Value------------------------------------------
+const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const {
+  if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
+  return bottom_type();
+}
+
 //=============================================================================
 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
@@ -2715,6 +2730,8 @@
 // control copies
 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   if (remove_dead_region(phase, can_reshape)) return this;
+  // Don't bother trying to transform a dead node
+  if (in(0) && in(0)->is_top())  return NULL;
 
   // Eliminate volatile MemBars for scalar replaced objects.
   if (can_reshape && req() == (Precedent+1) &&
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -800,6 +800,7 @@
   virtual uint match_edge(uint idx) const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual const Type *Value(PhaseTransform *phase) const;
 };
 
 //------------------------------StrComp-------------------------------------
--- a/hotspot/src/share/vm/opto/node.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/opto/node.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -641,7 +641,8 @@
     Flag_is_dead_loop_safe   = Flag_is_cisc_alternate << 1,
     Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
     Flag_avoid_back_to_back  = Flag_may_be_short_branch << 1,
-    _max_flags = (Flag_avoid_back_to_back << 1) - 1 // allow flags combination
+    Flag_has_call            = Flag_avoid_back_to_back << 1,
+    _max_flags = (Flag_has_call << 1) - 1 // allow flags combination
   };
 
 private:
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -890,7 +890,7 @@
   diagnostic(bool, TraceNMethodInstalls, false,                             \
              "Trace nmethod intallation")                                   \
                                                                             \
-  diagnostic(intx, ScavengeRootsInCode, 1,                                  \
+  diagnostic(intx, ScavengeRootsInCode, 2,                                  \
              "0: do not allow scavengable oops in the code cache; "         \
              "1: allow scavenging from the code cache; "                    \
              "2: emit as many constants as the compiler can see")           \
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu Dec 08 10:27:33 2011 -0800
@@ -30,6 +30,27 @@
 #include "runtime/simpleThresholdPolicy.inline.hpp"
 #include "code/scopeDesc.hpp"
 
+
+void SimpleThresholdPolicy::print_counters(const char* prefix, methodHandle mh) {
+  int invocation_count = mh->invocation_count();
+  int backedge_count = mh->backedge_count();
+  methodDataHandle mdh = mh->method_data();
+  int mdo_invocations = 0, mdo_backedges = 0;
+  int mdo_invocations_start = 0, mdo_backedges_start = 0;
+  if (mdh() != NULL) {
+    mdo_invocations = mdh->invocation_count();
+    mdo_backedges = mdh->backedge_count();
+    mdo_invocations_start = mdh->invocation_count_start();
+    mdo_backedges_start = mdh->backedge_count_start();
+  }
+  tty->print(" %stotal: %d,%d %smdo: %d(%d),%d(%d)", prefix,
+      invocation_count, backedge_count, prefix,
+      mdo_invocations, mdo_invocations_start,
+      mdo_backedges, mdo_backedges_start);
+  tty->print(" %smax levels: %d,%d", prefix,
+      mh->highest_comp_level(), mh->highest_osr_comp_level());
+}
+
 // Print an event.
 void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
                                         int bci, CompLevel level) {
@@ -38,8 +59,6 @@
   ttyLocker tty_lock;
   tty->print("%lf: [", os::elapsedTime());
 
-  int invocation_count = mh->invocation_count();
-  int backedge_count = mh->backedge_count();
   switch(type) {
   case CALL:
     tty->print("call");
@@ -82,23 +101,9 @@
   print_specific(type, mh, imh, bci, level);
 
   if (type != COMPILE) {
-    methodDataHandle mdh = mh->method_data();
-    int mdo_invocations = 0, mdo_backedges = 0;
-    int mdo_invocations_start = 0, mdo_backedges_start = 0;
-    if (mdh() != NULL) {
-      mdo_invocations = mdh->invocation_count();
-      mdo_backedges = mdh->backedge_count();
-      mdo_invocations_start = mdh->invocation_count_start();
-      mdo_backedges_start = mdh->backedge_count_start();
-    }
-    tty->print(" total: %d,%d mdo: %d(%d),%d(%d)",
-               invocation_count, backedge_count,
-               mdo_invocations, mdo_invocations_start,
-               mdo_backedges, mdo_backedges_start);
-    tty->print(" max levels: %d,%d",
-                mh->highest_comp_level(), mh->highest_osr_comp_level());
+    print_counters("", mh);
     if (inlinee_event) {
-      tty->print(" inlinee max levels: %d,%d", imh->highest_comp_level(), imh->highest_osr_comp_level());
+      print_counters("inlinee ", imh);
     }
     tty->print(" compilable: ");
     bool need_comma = false;
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Thu Dec 08 10:27:33 2011 -0800
@@ -55,7 +55,7 @@
   // loop_event checks if a method should be OSR compiled at a different
   // level.
   CompLevel loop_event(methodOop method, CompLevel cur_level);
-
+  void print_counters(const char* prefix, methodHandle mh);
 protected:
   int c1_count() const     { return _c1_count; }
   int c2_count() const     { return _c2_count; }
--- a/hotspot/test/compiler/6792161/Test6792161.java	Wed Dec 07 07:27:09 2011 -0800
+++ b/hotspot/test/compiler/6792161/Test6792161.java	Thu Dec 08 10:27:33 2011 -0800
@@ -27,7 +27,7 @@
  * @bug 6792161
  * @summary assert("No dead instructions after post-alloc")
  *
- * @run main/othervm -Xcomp -XX:MaxInlineSize=120 Test6792161
+ * @run main/othervm/timeout=300 -Xcomp -XX:MaxInlineSize=120 Test6792161
  */
 
 import java.lang.reflect.Constructor;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7110586/Test7110586.java	Thu Dec 08 10:27:33 2011 -0800
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7110586
+ * @summary C2 generates icorrect results
+ *
+ * @run main/othervm -Xbatch Test7110586
+ */
+
+public class Test7110586 {
+  static int test1() {
+    int i = 0;
+    for ( ; i < 11; i+=1) {}
+    return i;
+  }
+  static int test2() {
+    int i = 0;
+    for ( ; i < 11; i+=2) {}
+    return i;
+  }
+  static int test3() {
+    int i = 0;
+    for ( ; i < 11; i+=3) {}
+    return i;
+  }
+  static int test11() {
+    int i = 0;
+    for ( ; i < 11; i+=11) {}
+    return i;
+  }
+
+  static int testm1() {
+    int i = 0;
+    for ( ; i > -11; i-=1) {}
+    return i;
+  }
+  static int testm2() {
+    int i = 0;
+    for ( ; i > -11; i-=2) {}
+    return i;
+  }
+  static int testm3() {
+    int i = 0;
+    for ( ; i > -11; i-=3) {}
+    return i;
+  }
+  static int testm11() {
+    int i = 0;
+    for ( ; i > -11; i-=11) {}
+    return i;
+  }
+
+  public static void main(String args[]) {
+    int x1  = 0;
+    int x2  = 0;
+    int x3  = 0;
+    int x11 = 0;
+    int m1  = 0;
+    int m2  = 0;
+    int m3  = 0;
+    int m11 = 0;
+    for (int i=0; i<10000; i++) {
+      x1  = test1();
+      x2  = test2();
+      x3  = test3();
+      x11 = test11();
+      m1  = testm1();
+      m2  = testm2();
+      m3  = testm3();
+      m11 = testm11();
+    }
+    boolean failed = false;
+    if (x1 != 11) {
+      System.out.println("ERROR (incr = +1): " + x1 + " != 11");
+      failed = true;
+    }
+    if (x2 != 12) {
+      System.out.println("ERROR (incr = +2): " + x2 + " != 12");
+      failed = true;
+    }
+    if (x3 != 12) {
+      System.out.println("ERROR (incr = +3): " + x3 + " != 12");
+      failed = true;
+    }
+    if (x11 != 11) {
+      System.out.println("ERROR (incr = +11): " + x11 + " != 11");
+      failed = true;
+    }
+    if (m1 != -11) {
+      System.out.println("ERROR (incr = -1): " + m1 + " != -11");
+      failed = true;
+    }
+    if (m2 != -12) {
+      System.out.println("ERROR (incr = -2): " + m2 + " != -12");
+      failed = true;
+    }
+    if (m3 != -12) {
+      System.out.println("ERROR (incr = -3): " + m3 + " != -12");
+      failed = true;
+    }
+    if (m11 != -11) {
+      System.out.println("ERROR (incr = -11): " + m11 + " != -11");
+      failed = true;
+    }
+    if (failed) {
+      System.exit(97);
+    }
+  }
+}