Merge
authorneliasso
Thu, 27 Mar 2014 14:15:42 +0100
changeset 23500 4d2c3e2dc910
parent 23484 b9e6597aa718 (current diff)
parent 23499 9d5b7480c9f4 (diff)
child 23502 f41b7404d441
Merge
hotspot/src/os/aix/vm/os_aix.cpp
hotspot/src/share/vm/runtime/arguments.cpp
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/assembler.inline.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -37,6 +36,7 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
@@ -384,10 +384,10 @@
       bool load_xa = (xa != 0) || (xb < 0);
       bool return_xd = false;
 
-      if (load_xa) lis(tmp, xa);
-      if (xc) lis(d, xc);
+      if (load_xa) { lis(tmp, xa); }
+      if (xc) { lis(d, xc); }
       if (load_xa) {
-        if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
+        if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
       } else {
         li(tmp, xb); // non-negative
       }
@@ -409,18 +409,18 @@
     // opt 4: avoid adding 0
     if (xa) { // Highest 16-bit needed?
       lis(d, xa);
-      if (xb) addi(d, d, xb);
+      if (xb) { addi(d, d, xb); }
     } else {
       li(d, xb);
     }
     sldi(d, d, 32);
-    if (xc) addis(d, d, xc);
+    if (xc) { addis(d, d, xc); }
   }
 
   // opt 5: Return offset to be inserted into following instruction.
   if (return_simm16_rest) return xd;
 
-  if (xd) addi(d, d, xd);
+  if (xd) { addi(d, d, xd); }
   return 0;
 }
 
@@ -696,4 +696,5 @@
   tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
   code()->decode();
 }
+
 #endif // !PRODUCT
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -139,7 +139,8 @@
 inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
 inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
 
-inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE    | rt(d)  | ra(a) | rb(b) | bc(c)); }
+inline void Assembler::isel(Register d, Register a, Register b, int c) { guarantee(VM_Version::has_isel(), "opcode not supported on this hardware");
+                                                                         emit_int32(ISEL_OPCODE    | rt(d)  | ra(a) | rb(b) | bc(c)); }
 
 // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
 inline void Assembler::andi_(   Register a, Register s, int ui16)      { emit_int32(ANDI_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
@@ -531,9 +532,12 @@
 //inline void Assembler::mffgpr( FloatRegister d, Register b)   { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
 //inline void Assembler::mftgpr( Register d, FloatRegister b)   { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
 // add cmpb and popcntb to detect ppc power version.
-inline void Assembler::cmpb(   Register a, Register s, Register b) { emit_int32( CMPB_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
-inline void Assembler::popcntb(Register a, Register s)             { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
-inline void Assembler::popcntw(Register a, Register s)             { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::cmpb(   Register a, Register s, Register b) { guarantee(VM_Version::has_cmpb(), "opcode not supported on this hardware");
+                                                                     emit_int32( CMPB_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::popcntb(Register a, Register s)             { guarantee(VM_Version::has_popcntb(), "opcode not supported on this hardware");
+                                                                     emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::popcntw(Register a, Register s)             { guarantee(VM_Version::has_popcntw(), "opcode not supported on this hardware");
+                                                                     emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
 inline void Assembler::popcntd(Register a, Register s)             { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
 
 inline void Assembler::fneg(  FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE  | frt(d) | frb(b) | rc(0)); }
@@ -568,14 +572,17 @@
 inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE  | frt(d) | frb(b) | rc(0)); }
 inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
 inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE  | frt(d) | frb(b) | rc(0)); }
-inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fcfids(), "opcode not supported on this hardware");
+                                                                  emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
 
 // PPC 1, section 4.6.7 Floating-Point Compare Instructions
 inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
 
 // PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
-inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE  | frt(d) | frb(b) | rc(0)); }
-inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrt(), "opcode not supported on this hardware");
+                                                                  emit_int32( FSQRT_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrts(), "opcode not supported on this hardware");
+                                                                  emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
 
 // Vector instructions for >= Power6.
 inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
@@ -703,7 +710,8 @@
 inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
 inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
 inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
-inline void Assembler::vand(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vand(    VectorRegister d, VectorRegister a, VectorRegister b) { guarantee(VM_Version::has_vand(), "opcode not supported on this hardware");
+                                                                                        emit_int32( VAND_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vandc(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vnor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vor(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,7 @@
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
-define_pd_global(bool,  TrapBasedRangeChecks,        false);
+define_pd_global(bool,  TrapBasedRangeChecks,        true);
 
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,                ScaleForWordSize(16*M));
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "asm/assembler.inline.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "compiler/disassembler.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
@@ -1120,7 +1118,7 @@
   }
   return _last_calls_return_pc;
 }
-#endif
+#endif // ABI_ELFv2
 
 void MacroAssembler::call_VM_base(Register oop_result,
                                   Register last_java_sp,
@@ -1794,7 +1792,7 @@
   cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
   bne(cr_reg, cas_label);
 
-  load_klass_with_trap_null_check(temp_reg, obj_reg);
+  load_klass(temp_reg, obj_reg);
 
   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
@@ -1891,7 +1889,7 @@
   // the bias from one thread to another directly in this situation.
   andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
   orr(temp_reg, R16_thread, temp_reg);
-  load_klass_with_trap_null_check(temp2_reg, obj_reg);
+  load_klass(temp2_reg, obj_reg);
   ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
   orr(temp_reg, temp_reg, temp2_reg);
 
@@ -1927,7 +1925,7 @@
   // that another thread raced us for the privilege of revoking the
   // bias of this particular object, so it's okay to continue in the
   // normal locking code.
-  load_klass_with_trap_null_check(temp_reg, obj_reg);
+  load_klass(temp_reg, obj_reg);
   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
   andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
   orr(temp_reg, temp_reg, temp2_reg);
@@ -2213,8 +2211,7 @@
   stbx(R0, Rtmp, Robj);
 }
 
-#ifndef SERIALGC
-
+#if INCLUDE_ALL_GCS
 // General G1 pre-barrier generator.
 // Goal: record the previous value if it is not null.
 void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
@@ -2328,14 +2325,17 @@
 
   // Get the address of the card.
   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
-
-  assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
-  cmpwi(CCR0, Rtmp3 /* card value */, 0);
+  cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+  beq(CCR0, filtered);
+
+  membar(Assembler::StoreLoad);
+  lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);  // Reload after membar.
+  cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
   beq(CCR0, filtered);
 
   // Storing a region crossing, non-NULL oop, card is clean.
   // Dirty card and log.
-  li(Rtmp3, 0); // dirty
+  li(Rtmp3, CardTableModRefBS::dirty_card_val());
   //release(); // G1: oops are allowed to get visible after dirty marking.
   stbx(Rtmp3, Rbase, Rcard_addr);
 
@@ -2362,7 +2362,7 @@
 
   bind(filtered_int);
 }
-#endif // SERIALGC
+#endif // INCLUDE_ALL_GCS
 
 // Values for last_Java_pc, and last_Java_sp must comply to the rules
 // in frame_ppc64.hpp.
@@ -2453,7 +2453,8 @@
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
   if (Universe::narrow_klass_base() != 0) {
-    load_const(R0, Universe::narrow_klass_base(), (dst != current) ? dst : noreg); // Use dst as temp if it is free.
+    // Use dst as temp if it is free.
+    load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
     sub(dst, current, R0);
     current = dst;
   }
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -514,14 +514,14 @@
   void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
   void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
 
-#ifndef SERIALGC
+#if INCLUDE_ALL_GCS
   // General G1 pre-barrier generator.
   void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
                             Register Rtmp1, Register Rtmp2, bool needs_frame = false);
   // General G1 post-barrier generator
   void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
                              Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
-#endif // SERIALGC
+#endif
 
   // Support for managing the JavaThread pointer (i.e.; the reference to
   // thread-local information).
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,6 +119,7 @@
 
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
                                             bool for_compiler_entry) {
+  Label L_no_such_method;
   assert(method == R19_method, "interpreter calling convention");
   assert_different_registers(method, target, temp);
 
@@ -131,17 +132,31 @@
     __ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
     __ cmplwi(CCR0, temp, 0);
     __ beq(CCR0, run_compiled_code);
+    // Null method test is replicated below in compiled case,
+    // it might be able to address across the verify_thread()
+    __ cmplwi(CCR0, R19_method, 0);
+    __ beq(CCR0, L_no_such_method);
     __ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
     __ mtctr(target);
     __ bctr();
     __ BIND(run_compiled_code);
   }
 
+  // Compiled case, either static or fall-through from runtime conditional
+  __ cmplwi(CCR0, R19_method, 0);
+  __ beq(CCR0, L_no_such_method);
+
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ ld(target, in_bytes(entry_offset), R19_method);
   __ mtctr(target);
   __ bctr();
+
+  __ bind(L_no_such_method);
+  assert(StubRoutines::throw_AbstractMethodError_entry() != NULL, "not yet generated!");
+  __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
+  __ mtctr(target);
+  __ bctr();
 }
 
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Mar 27 14:15:42 2014 +0100
@@ -891,6 +891,13 @@
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description.
 source_hpp %{
+  // Header information of the source block.
+  // Method declarations/definitions which are used outside
+  // the ad-scope can conveniently be defined here.
+  //
+  // To keep related declarations/definitions/uses close together,
+  // we switch between source %{ }% and source_hpp %{ }% freely as needed.
+
   // Returns true if Node n is followed by a MemBar node that 
   // will do an acquire. If so, this node must not do the acquire
   // operation.
@@ -1114,6 +1121,40 @@
 
 //=============================================================================
 
+%} // interrupt source
+
+source_hpp %{ // Header information of the source block.
+
+//--------------------------------------------------------------
+//---<  Used for optimization in Compile::Shorten_branches  >---
+//--------------------------------------------------------------
+
+const uint trampoline_stub_size     =  6 * BytesPerInstWord;
+
+class CallStubImpl {
+
+ public:
+
+  static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
+
+  // Size of call trampoline stub.
+  // This doesn't need to be accurate to the byte, but it
+  // must be larger than or equal to the real size of the stub.
+  static uint size_call_trampoline() {
+    return trampoline_stub_size;
+  }
+
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() {
+    return 5;
+  }
+
+};
+
+%} // end source_hpp
+
+source %{
+
 // Emit a trampoline stub for a call to a target which is too far away.
 //
 // code sequences:
@@ -1125,9 +1166,7 @@
 //   load the call target from the constant pool
 //   branch via CTR (LR/link still points to the call-site above)
 
-const uint trampoline_stub_size = 6 * BytesPerInstWord;
-
-void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
+void CallStubImpl::emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
   // Start the stub.
   address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
   if (stub == NULL) {
@@ -1170,19 +1209,6 @@
   __ end_a_stub();
 }
 
-// Size of trampoline stub, this doesn't need to be accurate but it must
-// be larger or equal to the real size of the stub.
-// Used for optimization in Compile::Shorten_branches.
-uint size_call_trampoline() {
-  return trampoline_stub_size;
-}
-
-// Number of relocation entries needed by trampoline stub.
-// Used for optimization in Compile::Shorten_branches.
-uint reloc_call_trampoline() {
-  return 5;
-}
-
 //=============================================================================
 
 // Emit an inline branch-and-link call and a related trampoline stub.
@@ -1221,7 +1247,7 @@
     const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 
     // Emit the trampoline stub which will be related to the branch-and-link below.
-    emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+    CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
     __ relocate(rtype);
   }
 
@@ -2023,17 +2049,34 @@
 
 //=============================================================================
 
-uint size_exception_handler() {
-  // The exception_handler is a b64_patchable.
-  return MacroAssembler::b64_patchable_size;
-}
-
-uint size_deopt_handler() {
-  // The deopt_handler is a bl64_patchable.
-  return MacroAssembler::bl64_patchable_size;
-}
-
-int emit_exception_handler(CodeBuffer &cbuf) {
+%} // interrupt source
+
+source_hpp %{ // Header information of the source block.
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+    // The exception_handler is a b64_patchable.
+    return MacroAssembler::b64_patchable_size;
+  }
+
+  static uint size_deopt_handler() {
+    // The deopt_handler is a bl64_patchable.
+    return MacroAssembler::bl64_patchable_size;
+  }
+
+};
+
+%} // end source_hpp
+
+source %{
+
+int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
   MacroAssembler _masm(&cbuf);
 
   address base = __ start_a_stub(size_exception_handler());
@@ -2050,7 +2093,7 @@
 
 // The deopt_handler is like the exception handler, but it calls to
 // the deoptimization blob instead of jumping to the exception blob.
-int emit_deopt_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
   MacroAssembler _masm(&cbuf);
 
   address base = __ start_a_stub(size_deopt_handler());
@@ -3438,7 +3481,7 @@
         const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 
         // Emit the trampoline stub which will be related to the branch-and-link below.
-        emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+        CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
         __ relocate(_optimized_virtual ?
                     relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
       }
@@ -3481,7 +3524,7 @@
       const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 
       // Emit the trampoline stub which will be related to the branch-and-link below.
-      emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+      CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
       assert(_optimized_virtual, "methodHandle call should be a virtual call");
       __ relocate(relocInfo::opt_virtual_call_type);
     }
@@ -3531,7 +3574,7 @@
       const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
       const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
       const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
-      emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
+      CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
 
       if (ra_->C->env()->failing())
         return;
@@ -8755,6 +8798,7 @@
 // Single-precision sqrt.
 instruct sqrtF_reg(regF dst, regF src) %{
   match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
+  predicate(VM_Version::has_fsqrts());
   ins_cost(DEFAULT_COST);
 
   format %{ "FSQRTS  $dst, $src" %}
@@ -11550,8 +11594,7 @@
   // effect no longer needs to be mentioned, since r0 is not contained
   // in a reg_class.
 
-  format %{ "LD      R12, addr of polling page\n\t"
-            "LD      R0, #0, R12 \t// Safepoint poll for GC" %}
+  format %{ "LD      R0, #0, R12 \t// Safepoint poll for GC" %}
   ins_encode( enc_poll(0x0, poll) );
   ins_pipe(pipe_class_default);
 %}
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -34,6 +34,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "vmreg_ppc.inline.hpp"
+#include "adfiles/ad_ppc_64.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
 #endif
@@ -52,10 +53,6 @@
 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 
 
-// Used by generate_deopt_blob.  Defined in .ad file.
-extern uint size_deopt_handler();
-
-
 class RegisterSaver {
  // Used for saving volatile registers.
  public:
@@ -2782,7 +2779,7 @@
   // We can't grab a free register here, because all registers may
   // contain live values, so let the RegisterSaver do the adjustment
   // of the return pc.
-  const int return_pc_adjustment_no_exception = -size_deopt_handler();
+  const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
 
   // Push the "unpack frame"
   // Save everything in sight.
--- a/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,17 +23,6 @@
  *
  */
 
-#include "precompiled.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_aix
-# include "thread_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-
 // Implementation of the platform-specific part of StubRoutines - for
 // a description of how to extend it, see the stubRoutines.hpp file.
 
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -402,6 +402,9 @@
   CodeBuffer cb("detect_cpu_features", code_size, 0);
   MacroAssembler* a = new MacroAssembler(&cb);
 
+  // Must be set to true so we can generate the test code.
+  _features = VM_Version::all_features_m;
+
   // Emit code.
   void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
   uint32_t *code = (uint32_t *)a->pc();
@@ -409,14 +412,15 @@
   // Keep R3_ARG1 unmodified, it contains &field (see below).
   // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
   a->fsqrt(F3, F4);                            // code[0] -> fsqrt_m
-  a->isel(R7, R5, R6, 0);                      // code[1] -> isel_m
-  a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[2] -> lxarx_m
-  a->cmpb(R7, R5, R6);                         // code[3] -> bcmp
-  //a->mftgpr(R7, F3);                         // code[4] -> mftgpr
-  a->popcntb(R7, R5);                          // code[5] -> popcntb
-  a->popcntw(R7, R5);                          // code[6] -> popcntw
-  a->fcfids(F3, F4);                           // code[7] -> fcfids
-  a->vand(VR0, VR0, VR0);                      // code[8] -> vand
+  a->fsqrts(F3, F4);                           // code[1] -> fsqrts_m
+  a->isel(R7, R5, R6, 0);                      // code[2] -> isel_m
+  a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
+  a->cmpb(R7, R5, R6);                         // code[4] -> bcmp
+  //a->mftgpr(R7, F3);                         // code[5] -> mftgpr
+  a->popcntb(R7, R5);                          // code[6] -> popcntb
+  a->popcntw(R7, R5);                          // code[7] -> popcntw
+  a->fcfids(F3, F4);                           // code[8] -> fcfids
+  a->vand(VR0, VR0, VR0);                      // code[9] -> vand
   a->blr();
 
   // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
@@ -426,6 +430,7 @@
 
   uint32_t *code_end = (uint32_t *)a->pc();
   a->flush();
+  _features = VM_Version::unknown_m;
 
   // Print the detection code.
   if (PrintAssembly) {
@@ -450,6 +455,7 @@
   // determine which instructions are legal.
   int feature_cntr = 0;
   if (code[feature_cntr++]) features |= fsqrt_m;
+  if (code[feature_cntr++]) features |= fsqrts_m;
   if (code[feature_cntr++]) features |= isel_m;
   if (code[feature_cntr++]) features |= lxarxeh_m;
   if (code[feature_cntr++]) features |= cmpb_m;
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
 protected:
   enum Feature_Flag {
     fsqrt,
+    fsqrts,
     isel,
     lxarxeh,
     cmpb,
@@ -46,6 +47,7 @@
   enum Feature_Flag_Set {
     unknown_m             = 0,
     fsqrt_m               = (1 << fsqrt  ),
+    fsqrts_m              = (1 << fsqrts ),
     isel_m                = (1 << isel   ),
     lxarxeh_m             = (1 << lxarxeh),
     cmpb_m                = (1 << cmpb   ),
@@ -72,6 +74,7 @@
   static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
   // CPU instruction support
   static bool has_fsqrt()   { return (_features & fsqrt_m) != 0; }
+  static bool has_fsqrts()  { return (_features & fsqrts_m) != 0; }
   static bool has_isel()    { return (_features & isel_m) != 0; }
   static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
   static bool has_cmpb()    { return (_features & cmpb_m) != 0; }
--- a/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
   address npe_addr = __ pc(); // npe = null pointer exception
   __ load_klass_with_trap_null_check(rcvr_klass, R3);
 
- // Set methodOop (in case of interpreted method), and destination address.
+ // Set method (in case of interpreted method), and destination address.
   int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
 
 #ifndef PRODUCT
@@ -161,8 +161,6 @@
   address npe_addr = __ pc(); // npe = null pointer exception
   __ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
 
-  //__ ld(rcvr_klass, oopDesc::klass_offset_in_bytes(), R3_ARG1);
-
   BLOCK_COMMENT("Load start of itable entries into itable_entry.");
   __ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
   __ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
@@ -199,7 +197,7 @@
                                    itable_offset_search_inc;
   __ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
 
-  // Compute itableMethodEntry and get methodOop and entry point for compiler.
+  // Compute itableMethodEntry and get method and entry point for compiler.
   const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
     itableMethodEntry::method_offset_in_bytes();
 
@@ -211,7 +209,7 @@
     Label ok;
     __ cmpd(CCR0, R19_method, 0);
     __ bne(CCR0, ok);
-    __ stop("methodOop is null", 103);
+    __ stop("method is null", 103);
     __ bind(ok);
   }
 #endif
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -3320,7 +3320,7 @@
 
   // if tmp is invalid, then the function being called doesn't destroy the thread
   if (tmp->is_valid()) {
-    __ save_thread(tmp->as_register());
+    __ save_thread(tmp->as_pointer_register());
   }
   __ call(dest, relocInfo::runtime_call_type);
   __ delayed()->nop();
@@ -3328,7 +3328,7 @@
     add_call_info_here(info);
   }
   if (tmp->is_valid()) {
-    __ restore_thread(tmp->as_register());
+    __ restore_thread(tmp->as_pointer_register());
   }
 
 #ifdef ASSERT
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -69,7 +69,7 @@
 LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::Oexception_opr;  }
 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::Oissuing_pc_opr; }
 LIR_Opr LIRGenerator::syncTempOpr()                  { return new_register(T_OBJECT); }
-LIR_Opr LIRGenerator::getThreadTemp()                { return rlock_callee_saved(T_INT); }
+LIR_Opr LIRGenerator::getThreadTemp()                { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
 
 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
   LIR_Opr opr;
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -66,6 +66,4 @@
 define_pd_global(bool, CSEArrayLength,               true );
 define_pd_global(bool, TwoOperandLIRForm,            false);
 
-define_pd_global(intx, SafepointPollOffset,          0    );
-
 #endif // CPU_SPARC_VM_C1_GLOBALS_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Mar 27 14:15:42 2014 +0100
@@ -457,6 +457,13 @@
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
 source_hpp %{
+// Header information of the source block.
+// Method declarations/definitions which are used outside
+// the ad-scope can conveniently be defined here.
+//
+// To keep related declarations/definitions/uses close together,
+// we switch between source %{ }% and source_hpp %{ }% freely as needed.
+
 // Must be visible to the DFA in dfa_sparc.cpp
 extern bool can_branch_register( Node *bol, Node *cmp );
 
@@ -468,6 +475,46 @@
 #define LONG_HI_REG(x) (x)
 #define LONG_LO_REG(x) (x)
 
+class CallStubImpl {
+
+  //--------------------------------------------------------------
+  //---<  Used for optimization in Compile::Shorten_branches  >---
+  //--------------------------------------------------------------
+
+ public:
+  // Size of call trampoline stub.
+  static uint size_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+};
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+    if (TraceJumps) {
+      return (400); // just a guess
+    }
+    return ( NativeJump::instruction_size ); // sethi;jmp;nop
+  }
+
+  static uint size_deopt_handler() {
+    if (TraceJumps) {
+      return (400); // just a guess
+    }
+    return ( 4+  NativeJump::instruction_size ); // save;sethi;jmp;restore
+  }
+};
+
 %}
 
 source %{
@@ -1710,22 +1757,9 @@
 
 //=============================================================================
 
-uint size_exception_handler() {
-  if (TraceJumps) {
-    return (400); // just a guess
-  }
-  return ( NativeJump::instruction_size ); // sethi;jmp;nop
-}
-
-uint size_deopt_handler() {
-  if (TraceJumps) {
-    return (400); // just a guess
-  }
-  return ( 4+  NativeJump::instruction_size ); // save;sethi;jmp;restore
-}
 
 // Emit exception handler code.
-int emit_exception_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
   Register temp_reg = G3;
   AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
   MacroAssembler _masm(&cbuf);
@@ -1746,7 +1780,7 @@
   return offset;
 }
 
-int emit_deopt_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
   // Can't use any of the current frame's registers as we may have deopted
   // at a poll and everything (including G3) can be live.
   Register temp_reg = L0;
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1112,7 +1112,6 @@
 }
 
 void Assembler::bsrl(Register dst, Register src) {
-  assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
   int encode = prefix_and_encode(dst->encoding(), src->encoding());
   emit_int8(0x0F);
   emit_int8((unsigned char)0xBD);
@@ -2343,6 +2342,11 @@
   emit_int8(imm8);
 }
 
+void Assembler::pause() {
+  emit_int8((unsigned char)0xF3);
+  emit_int8((unsigned char)0x90);
+}
+
 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
   assert(VM_Version::supports_sse4_2(), "");
   InstructionMark im(this);
@@ -2667,6 +2671,11 @@
   }
 }
 
+void Assembler::rdtsc() {
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0x31);
+}
+
 // copies data from [esi] to [edi] using rcx pointer sized words
 // generic
 void Assembler::rep_mov() {
@@ -2976,6 +2985,11 @@
   emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
 }
 
+void Assembler::xabort(int8_t imm8) {
+  emit_int8((unsigned char)0xC6);
+  emit_int8((unsigned char)0xF8);
+  emit_int8((unsigned char)(imm8 & 0xFF));
+}
 
 void Assembler::xaddl(Address dst, Register src) {
   InstructionMark im(this);
@@ -2985,6 +2999,24 @@
   emit_operand(src, dst);
 }
 
+void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
+  InstructionMark im(this);
+  relocate(rtype);
+  if (abort.is_bound()) {
+    address entry = target(abort);
+    assert(entry != NULL, "abort entry NULL");
+    intptr_t offset = entry - pc();
+    emit_int8((unsigned char)0xC7);
+    emit_int8((unsigned char)0xF8);
+    emit_int32(offset - 6); // 2 opcode + 4 address
+  } else {
+    abort.add_patch_at(code(), locator());
+    emit_int8((unsigned char)0xC7);
+    emit_int8((unsigned char)0xF8);
+    emit_int32(0);
+  }
+}
+
 void Assembler::xchgl(Register dst, Address src) { // xchg
   InstructionMark im(this);
   prefix(src, dst);
@@ -2998,6 +3030,12 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::xend() {
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0x01);
+  emit_int8((unsigned char)0xD5);
+}
+
 void Assembler::xgetbv() {
   emit_int8(0x0F);
   emit_int8(0x01);
@@ -4938,7 +4976,6 @@
 }
 
 void Assembler::bsrq(Register dst, Register src) {
-  assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
   int encode = prefixq_and_encode(dst->encoding(), src->encoding());
   emit_int8(0x0F);
   emit_int8((unsigned char)0xBD);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1451,6 +1451,8 @@
   // Pemutation of 64bit words
   void vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256);
 
+  void pause();
+
   // SSE4.2 string instructions
   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
@@ -1535,6 +1537,8 @@
 
   void rclq(Register dst, int imm8);
 
+  void rdtsc();
+
   void ret(int imm16);
 
   void sahf();
@@ -1632,16 +1636,22 @@
   void ucomiss(XMMRegister dst, Address src);
   void ucomiss(XMMRegister dst, XMMRegister src);
 
+  void xabort(int8_t imm8);
+
   void xaddl(Address dst, Register src);
 
   void xaddq(Address dst, Register src);
 
+  void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
+
   void xchgl(Register reg, Address adr);
   void xchgl(Register dst, Register src);
 
   void xchgq(Register reg, Address adr);
   void xchgq(Register dst, Register src);
 
+  void xend();
+
   // Get Value of Extended Control Register
   void xgetbv();
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -604,8 +604,7 @@
 
   // Note: we do not need to round double result; float result has the right precision
   // the poll sets the condition code, but no data registers
-  AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
-                              relocInfo::poll_return_type);
+  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
 
   if (Assembler::is_polling_page_far()) {
     __ lea(rscratch1, polling_page);
@@ -619,8 +618,7 @@
 
 
 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
-  AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
-                              relocInfo::poll_type);
+  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
   guarantee(info != NULL, "Shouldn't be NULL");
   int offset = __ offset();
   if (Assembler::is_polling_page_far()) {
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -65,6 +65,4 @@
 define_pd_global(bool, CSEArrayLength,               false);
 define_pd_global(bool, TwoOperandLIRForm,            true );
 
-define_pd_global(intx, SafepointPollOffset,          256  );
-
 #endif // CPU_X86_VM_C1_GLOBALS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -129,6 +129,42 @@
   product(bool, UseFastStosb, false,                                        \
           "Use fast-string operation for zeroing: rep stosb")               \
                                                                             \
+  /* Use Restricted Transactional Memory for lock eliding */                \
+  product(bool, UseRTMLocking, false,                                       \
+          "Enable RTM lock eliding for inflated locks in compiled code")    \
+                                                                            \
+  experimental(bool, UseRTMForStackLocks, false,                            \
+          "Enable RTM lock eliding for stack locks in compiled code")       \
+                                                                            \
+  product(bool, UseRTMDeopt, false,                                         \
+          "Perform deopt and recompilation based on RTM abort ratio")       \
+                                                                            \
+  product(uintx, RTMRetryCount, 5,                                          \
+          "Number of RTM retries on lock abort or busy")                    \
+                                                                            \
+  experimental(intx, RTMSpinLoopCount, 100,                                 \
+          "Spin count for lock to become free before RTM retry")            \
+                                                                            \
+  experimental(intx, RTMAbortThreshold, 1000,                               \
+          "Calculate abort ratio after this number of aborts")              \
+                                                                            \
+  experimental(intx, RTMLockingThreshold, 10000,                            \
+          "Lock count at which to do RTM lock eliding without "             \
+          "abort ratio calculation")                                        \
+                                                                            \
+  experimental(intx, RTMAbortRatio, 50,                                     \
+          "Lock abort ratio at which to stop use RTM lock eliding")         \
+                                                                            \
+  experimental(intx, RTMTotalCountIncrRate, 64,                             \
+          "Increment total RTM attempted lock count once every n times")    \
+                                                                            \
+  experimental(intx, RTMLockingCalculationDelay, 0,                         \
+          "Number of milliseconds to wait before start calculating aborts " \
+          "for RTM locking")                                                \
+                                                                            \
+  experimental(bool, UseRTMXendForLockBusy, false,                          \
+          "Use RTM Xend instead of Xabort when lock busy")                  \
+                                                                            \
   /* assembler */                                                           \
   product(bool, Use486InstrsOnly, false,                                    \
           "Use 80486 Compliant instruction subset")                         \
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -301,7 +301,9 @@
   mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 }
 
-void MacroAssembler::movptr(Register dst, AddressLiteral src) {
+void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
+  // scratch register is not used,
+  // it is defined to match parameters of 64-bit version of this method.
   if (src.is_lval()) {
     mov_literal32(dst, (intptr_t)src.target(), src.rspec());
   } else {
@@ -613,6 +615,15 @@
   /* else */      { subq(dst, value)       ; return; }
 }
 
+void MacroAssembler::incrementq(AddressLiteral dst) {
+  if (reachable(dst)) {
+    incrementq(as_Address(dst));
+  } else {
+    lea(rscratch1, dst);
+    incrementq(Address(rscratch1, 0));
+  }
+}
+
 void MacroAssembler::incrementq(Register reg, int value) {
   if (value == min_jint) { addq(reg, value); return; }
   if (value <  0) { decrementq(reg, -value); return; }
@@ -681,15 +692,15 @@
   movq(dst, rscratch1);
 }
 
-void MacroAssembler::movptr(Register dst, AddressLiteral src) {
+void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
   if (src.is_lval()) {
     mov_literal64(dst, (intptr_t)src.target(), src.rspec());
   } else {
     if (reachable(src)) {
       movq(dst, as_Address(src));
     } else {
-      lea(rscratch1, src);
-      movq(dst, Address(rscratch1,0));
+      lea(scratch, src);
+      movq(dst, Address(scratch, 0));
     }
   }
 }
@@ -988,20 +999,37 @@
   LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
 }
 
-void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
-  pushf();
+void MacroAssembler::atomic_incl(Address counter_addr) {
+  if (os::is_MP())
+    lock();
+  incrementl(counter_addr);
+}
+
+void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
   if (reachable(counter_addr)) {
-    if (os::is_MP())
-      lock();
-    incrementl(as_Address(counter_addr));
+    atomic_incl(as_Address(counter_addr));
   } else {
-    lea(rscratch1, counter_addr);
-    if (os::is_MP())
-      lock();
-    incrementl(Address(rscratch1, 0));
-  }
-  popf();
-}
+    lea(scr, counter_addr);
+    atomic_incl(Address(scr, 0));
+  }
+}
+
+#ifdef _LP64
+void MacroAssembler::atomic_incq(Address counter_addr) {
+  if (os::is_MP())
+    lock();
+  incrementq(counter_addr);
+}
+
+void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
+  if (reachable(counter_addr)) {
+    atomic_incq(as_Address(counter_addr));
+  } else {
+    lea(scr, counter_addr);
+    atomic_incq(Address(scr, 0));
+  }
+}
+#endif
 
 // Writes to stack successive pages until offset reached to check for
 // stack overflow + shadow pages.  This clobbers tmp.
@@ -1274,6 +1302,325 @@
 }
 
 #ifdef COMPILER2
+
+#if INCLUDE_RTM_OPT
+
+// Update rtm_counters based on abort status
+// input: abort_status
+//        rtm_counters (RTMLockingCounters*)
+// flags are killed
+void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
+
+  atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
+  if (PrintPreciseRTMLockingStatistics) {
+    for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
+      Label check_abort;
+      testl(abort_status, (1<<i));
+      jccb(Assembler::equal, check_abort);
+      atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
+      bind(check_abort);
+    }
+  }
+}
+
+// Branch if (random & (count-1) != 0), count is 2^n
+// tmp, scr and flags are killed
+void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
+  assert(tmp == rax, "");
+  assert(scr == rdx, "");
+  rdtsc(); // modifies EDX:EAX
+  andptr(tmp, count-1);
+  jccb(Assembler::notZero, brLabel);
+}
+
+// Perform abort ratio calculation, set no_rtm bit if high ratio
+// input:  rtm_counters_Reg (RTMLockingCounters* address)
+// tmpReg, rtm_counters_Reg and flags are killed
+void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
+                                                 Register rtm_counters_Reg,
+                                                 RTMLockingCounters* rtm_counters,
+                                                 Metadata* method_data) {
+  Label L_done, L_check_always_rtm1, L_check_always_rtm2;
+
+  if (RTMLockingCalculationDelay > 0) {
+    // Delay calculation
+    movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
+    testptr(tmpReg, tmpReg);
+    jccb(Assembler::equal, L_done);
+  }
+  // Abort ratio calculation only if abort_count > RTMAbortThreshold
+  //   Aborted transactions = abort_count * 100
+  //   All transactions = total_count *  RTMTotalCountIncrRate
+  //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
+
+  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
+  cmpptr(tmpReg, RTMAbortThreshold);
+  jccb(Assembler::below, L_check_always_rtm2);
+  imulptr(tmpReg, tmpReg, 100);
+
+  Register scrReg = rtm_counters_Reg;
+  movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
+  imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
+  imulptr(scrReg, scrReg, RTMAbortRatio);
+  cmpptr(tmpReg, scrReg);
+  jccb(Assembler::below, L_check_always_rtm1);
+  if (method_data != NULL) {
+    // set rtm_state to "no rtm" in MDO
+    mov_metadata(tmpReg, method_data);
+    if (os::is_MP()) {
+      lock();
+    }
+    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
+  }
+  jmpb(L_done);
+  bind(L_check_always_rtm1);
+  // Reload RTMLockingCounters* address
+  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
+  bind(L_check_always_rtm2);
+  movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
+  cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
+  jccb(Assembler::below, L_done);
+  if (method_data != NULL) {
+    // set rtm_state to "always rtm" in MDO
+    mov_metadata(tmpReg, method_data);
+    if (os::is_MP()) {
+      lock();
+    }
+    orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
+  }
+  bind(L_done);
+}
+
+// Update counters and perform abort ratio calculation
+// input:  abort_status_Reg
+// rtm_counters_Reg, flags are killed
+void MacroAssembler::rtm_profiling(Register abort_status_Reg,
+                                   Register rtm_counters_Reg,
+                                   RTMLockingCounters* rtm_counters,
+                                   Metadata* method_data,
+                                   bool profile_rtm) {
+
+  assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+  // update rtm counters based on rax value at abort
+  // reads abort_status_Reg, updates flags
+  lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
+  rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
+  if (profile_rtm) {
+    // Save abort status because abort_status_Reg is used by following code.
+    if (RTMRetryCount > 0) {
+      push(abort_status_Reg);
+    }
+    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+    rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
+    // restore abort status
+    if (RTMRetryCount > 0) {
+      pop(abort_status_Reg);
+    }
+  }
+}
+
+// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
+// inputs: retry_count_Reg
+//       : abort_status_Reg
+// output: retry_count_Reg decremented by 1
+// flags are killed
+void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
+  Label doneRetry;
+  assert(abort_status_Reg == rax, "");
+  // The abort reason bits are in eax (see all states in rtmLocking.hpp)
+  // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
+  // if reason is in 0x6 and retry count != 0 then retry
+  andptr(abort_status_Reg, 0x6);
+  jccb(Assembler::zero, doneRetry);
+  testl(retry_count_Reg, retry_count_Reg);
+  jccb(Assembler::zero, doneRetry);
+  pause();
+  decrementl(retry_count_Reg);
+  jmp(retryLabel);
+  bind(doneRetry);
+}
+
+// Spin and retry if lock is busy,
+// inputs: box_Reg (monitor address)
+//       : retry_count_Reg
+// output: retry_count_Reg decremented by 1
+//       : clear z flag if retry count exceeded
+// tmp_Reg, scr_Reg, flags are killed
+void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
+                                            Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
+  Label SpinLoop, SpinExit, doneRetry;
+  // Clean monitor_value bit to get valid pointer
+  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+
+  testl(retry_count_Reg, retry_count_Reg);
+  jccb(Assembler::zero, doneRetry);
+  decrementl(retry_count_Reg);
+  movptr(scr_Reg, RTMSpinLoopCount);
+
+  bind(SpinLoop);
+  pause();
+  decrementl(scr_Reg);
+  jccb(Assembler::lessEqual, SpinExit);
+  movptr(tmp_Reg, Address(box_Reg, owner_offset));
+  testptr(tmp_Reg, tmp_Reg);
+  jccb(Assembler::notZero, SpinLoop);
+
+  bind(SpinExit);
+  jmp(retryLabel);
+  bind(doneRetry);
+  incrementl(retry_count_Reg); // clear z flag
+}
+
+// Use RTM for normal stack locks
+// Input: objReg (object to lock)
+void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
+                                       Register retry_on_abort_count_Reg,
+                                       RTMLockingCounters* stack_rtm_counters,
+                                       Metadata* method_data, bool profile_rtm,
+                                       Label& DONE_LABEL, Label& IsInflated) {
+  assert(UseRTMForStackLocks, "why call this otherwise?");
+  assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
+  assert(tmpReg == rax, "");
+  assert(scrReg == rdx, "");
+  Label L_rtm_retry, L_decrement_retry, L_on_abort;
+
+  if (RTMRetryCount > 0) {
+    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
+    bind(L_rtm_retry);
+  }
+  if (!UseRTMXendForLockBusy) {
+    movptr(tmpReg, Address(objReg, 0));
+    testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+    jcc(Assembler::notZero, IsInflated);
+  }
+  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+    Label L_noincrement;
+    if (RTMTotalCountIncrRate > 1) {
+      // tmpReg, scrReg and flags are killed
+      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
+    }
+    assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
+    atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
+    bind(L_noincrement);
+  }
+  xbegin(L_on_abort);
+  movptr(tmpReg, Address(objReg, 0));       // fetch markword
+  andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
+  cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
+  jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
+
+  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
+  if (UseRTMXendForLockBusy) {
+    xend();
+    movptr(tmpReg, Address(objReg, 0));
+    testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+    jcc(Assembler::notZero, IsInflated);
+    movptr(abort_status_Reg, 0x1);                // Set the abort status to 1 (as xabort does)
+    jmp(L_decrement_retry);
+  }
+  else {
+    xabort(0);
+  }
+  bind(L_on_abort);
+  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+    rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
+  }
+  bind(L_decrement_retry);
+  if (RTMRetryCount > 0) {
+    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
+    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
+  }
+}
+
+// Use RTM for inflating locks
+// inputs: objReg (object to lock)
+//         boxReg (on-stack box address (displaced header location) - KILLED)
+//         tmpReg (ObjectMonitor address + 2(monitor_value))
+void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
+                                          Register scrReg, Register retry_on_busy_count_Reg,
+                                          Register retry_on_abort_count_Reg,
+                                          RTMLockingCounters* rtm_counters,
+                                          Metadata* method_data, bool profile_rtm,
+                                          Label& DONE_LABEL) {
+  assert(UseRTMLocking, "why call this otherwise?");
+  assert(tmpReg == rax, "");
+  assert(scrReg == rdx, "");
+  Label L_rtm_retry, L_decrement_retry, L_on_abort;
+  // Clean monitor_value bit to get valid pointer
+  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+
+  // Without cast to int32_t a movptr will destroy r10 which is typically obj
+  movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+  movptr(boxReg, tmpReg); // Save ObjectMonitor address
+
+  if (RTMRetryCount > 0) {
+    movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
+    movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
+    bind(L_rtm_retry);
+  }
+  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+    Label L_noincrement;
+    if (RTMTotalCountIncrRate > 1) {
+      // tmpReg, scrReg and flags are killed
+      branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
+    }
+    assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
+    atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
+    bind(L_noincrement);
+  }
+  xbegin(L_on_abort);
+  movptr(tmpReg, Address(objReg, 0));
+  movptr(tmpReg, Address(tmpReg, owner_offset));
+  testptr(tmpReg, tmpReg);
+  jcc(Assembler::zero, DONE_LABEL);
+  if (UseRTMXendForLockBusy) {
+    xend();
+    jmp(L_decrement_retry);
+  }
+  else {
+    xabort(0);
+  }
+  bind(L_on_abort);
+  Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
+  if (PrintPreciseRTMLockingStatistics || profile_rtm) {
+    rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
+  }
+  if (RTMRetryCount > 0) {
+    // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
+    rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
+  }
+
+  movptr(tmpReg, Address(boxReg, owner_offset)) ;
+  testptr(tmpReg, tmpReg) ;
+  jccb(Assembler::notZero, L_decrement_retry) ;
+
+  // Appears unlocked - try to swing _owner from null to non-null.
+  // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
+#ifdef _LP64
+  Register threadReg = r15_thread;
+#else
+  get_thread(scrReg);
+  Register threadReg = scrReg;
+#endif
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
+
+  if (RTMRetryCount > 0) {
+    // success done else retry
+    jccb(Assembler::equal, DONE_LABEL) ;
+    bind(L_decrement_retry);
+    // Spin and retry if lock is busy.
+    rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
+  }
+  else {
+    bind(L_decrement_retry);
+  }
+}
+
+#endif //  INCLUDE_RTM_OPT
+
 // Fast_Lock and Fast_Unlock used by C2
 
 // Because the transitions from emitted code to the runtime
@@ -1350,17 +1697,26 @@
 // box: on-stack box address (displaced header location) - KILLED
 // rax,: tmp -- KILLED
 // scr: tmp -- KILLED
-void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, BiasedLockingCounters* counters) {
+void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
+                               Register scrReg, Register cx1Reg, Register cx2Reg,
+                               BiasedLockingCounters* counters,
+                               RTMLockingCounters* rtm_counters,
+                               RTMLockingCounters* stack_rtm_counters,
+                               Metadata* method_data,
+                               bool use_rtm, bool profile_rtm) {
   // Ensure the register assignents are disjoint
-  guarantee (objReg != boxReg, "");
-  guarantee (objReg != tmpReg, "");
-  guarantee (objReg != scrReg, "");
-  guarantee (boxReg != tmpReg, "");
-  guarantee (boxReg != scrReg, "");
-  guarantee (tmpReg == rax, "");
+  assert(tmpReg == rax, "");
+
+  if (use_rtm) {
+    assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
+  } else {
+    assert(cx1Reg == noreg, "");
+    assert(cx2Reg == noreg, "");
+    assert_different_registers(objReg, boxReg, tmpReg, scrReg);
+  }
 
   if (counters != NULL) {
-    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()));
+    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
   }
   if (EmitSync & 1) {
       // set box->dhw = unused_mark (3)
@@ -1419,12 +1775,20 @@
       biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
     }
 
+#if INCLUDE_RTM_OPT
+    if (UseRTMForStackLocks && use_rtm) {
+      rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
+                        stack_rtm_counters, method_data, profile_rtm,
+                        DONE_LABEL, IsInflated);
+    }
+#endif // INCLUDE_RTM_OPT
+
     movptr(tmpReg, Address(objReg, 0));          // [FETCH]
-    testl (tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
-    jccb  (Assembler::notZero, IsInflated);
+    testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
+    jccb(Assembler::notZero, IsInflated);
 
     // Attempt stack-locking ...
-    orptr (tmpReg, 0x1);
+    orptr (tmpReg, markOopDesc::unlocked_value);
     movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
     if (os::is_MP()) {
       lock();
@@ -1434,19 +1798,32 @@
       cond_inc32(Assembler::equal,
                  ExternalAddress((address)counters->fast_path_entry_count_addr()));
     }
-    jccb(Assembler::equal, DONE_LABEL);
-
-    // Recursive locking
+    jcc(Assembler::equal, DONE_LABEL);           // Success
+
+    // Recursive locking.
+    // The object is stack-locked: markword contains stack pointer to BasicLock.
+    // Locked by current thread if difference with current SP is less than one page.
     subptr(tmpReg, rsp);
+    // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
     andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
     movptr(Address(boxReg, 0), tmpReg);
     if (counters != NULL) {
       cond_inc32(Assembler::equal,
                  ExternalAddress((address)counters->fast_path_entry_count_addr()));
     }
-    jmpb(DONE_LABEL);
+    jmp(DONE_LABEL);
 
     bind(IsInflated);
+    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
+
+#if INCLUDE_RTM_OPT
+    // Use the same RTM locking code in 32- and 64-bit VM.
+    if (use_rtm) {
+      rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
+                           rtm_counters, method_data, profile_rtm, DONE_LABEL);
+    } else {
+#endif // INCLUDE_RTM_OPT
+
 #ifndef _LP64
     // The object is inflated.
     //
@@ -1576,7 +1953,7 @@
     // Without cast to int32_t a movptr will destroy r10 which is typically obj
     movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
 
-    mov    (boxReg, tmpReg);
+    movptr (boxReg, tmpReg);
     movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
     testptr(tmpReg, tmpReg);
     jccb   (Assembler::notZero, DONE_LABEL);
@@ -1587,9 +1964,11 @@
     }
     cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
     // Intentional fall-through into DONE_LABEL ...
-
+#endif // _LP64
+
+#if INCLUDE_RTM_OPT
+    } // use_rtm()
 #endif
-
     // DONE_LABEL is a hot target - we'd really like to place it at the
     // start of cache line by padding with NOPs.
     // See the AMD and Intel software optimization manuals for the
@@ -1631,11 +2010,9 @@
 // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
 
-void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
-  guarantee (objReg != boxReg, "");
-  guarantee (objReg != tmpReg, "");
-  guarantee (boxReg != tmpReg, "");
-  guarantee (boxReg == rax, "");
+void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
+  assert(boxReg == rax, "");
+  assert_different_registers(objReg, boxReg, tmpReg);
 
   if (EmitSync & 4) {
     // Disable - inhibit all inlining.  Force control through the slow-path
@@ -1667,14 +2044,41 @@
        biased_locking_exit(objReg, tmpReg, DONE_LABEL);
     }
 
+#if INCLUDE_RTM_OPT
+    if (UseRTMForStackLocks && use_rtm) {
+      assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
+      Label L_regular_unlock;
+      movptr(tmpReg, Address(objReg, 0));           // fetch markword
+      andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
+      cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
+      jccb(Assembler::notEqual, L_regular_unlock);  // if !HLE RegularLock
+      xend();                                       // otherwise end...
+      jmp(DONE_LABEL);                              // ... and we're done
+      bind(L_regular_unlock);
+    }
+#endif
+
     cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
+    jcc   (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
     movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
-    jccb  (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
-
-    testptr(tmpReg, 0x02);                          // Inflated?
+    testptr(tmpReg, markOopDesc::monitor_value);    // Inflated?
     jccb  (Assembler::zero, Stacked);
 
     // It's inflated.
+#if INCLUDE_RTM_OPT
+    if (use_rtm) {
+      Label L_regular_inflated_unlock;
+      // Clean monitor_value bit to get valid pointer
+      int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+      movptr(boxReg, Address(tmpReg, owner_offset));
+      testptr(boxReg, boxReg);
+      jccb(Assembler::notZero, L_regular_inflated_unlock);
+      xend();
+      jmpb(DONE_LABEL);
+      bind(L_regular_inflated_unlock);
+    }
+#endif
+
     // Despite our balanced locking property we still check that m->_owner == Self
     // as java routines or native JNI code called by this thread might
     // have released the lock.
@@ -2448,7 +2852,9 @@
   Condition negated_cond = negate_condition(cond);
   Label L;
   jcc(negated_cond, L);
+  pushf(); // Preserve flags
   atomic_incl(counter_addr);
+  popf();
   bind(L);
 }
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -27,6 +27,7 @@
 
 #include "asm/assembler.hpp"
 #include "utilities/macros.hpp"
+#include "runtime/rtmLocking.hpp"
 
 
 // MacroAssembler extends Assembler by frequently used macros.
@@ -111,7 +112,8 @@
         op == 0xE9 /* jmp */ ||
         op == 0xEB /* short jmp */ ||
         (op & 0xF0) == 0x70 /* short jcc */ ||
-        op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
+        op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
+        op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
         "Invalid opcode at patch point");
 
     if (op == 0xEB || (op & 0xF0) == 0x70) {
@@ -121,7 +123,7 @@
       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
       *disp = imm8;
     } else {
-      int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
+      int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
       int imm32 = target - (address) &disp[1];
       *disp = imm32;
     }
@@ -161,7 +163,6 @@
   void incrementq(Register reg, int value = 1);
   void incrementq(Address dst, int value = 1);
 
-
   // Support optimal SSE move instructions.
   void movflt(XMMRegister dst, XMMRegister src) {
     if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
@@ -187,6 +188,8 @@
   void incrementl(AddressLiteral dst);
   void incrementl(ArrayAddress dst);
 
+  void incrementq(AddressLiteral dst);
+
   // Alignment
   void align(int modulus);
 
@@ -654,8 +657,36 @@
 #ifdef COMPILER2
   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
   // See full desription in macroAssembler_x86.cpp.
-  void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
-  void fast_unlock(Register obj, Register box, Register tmp);
+  void fast_lock(Register obj, Register box, Register tmp,
+                 Register scr, Register cx1, Register cx2,
+                 BiasedLockingCounters* counters,
+                 RTMLockingCounters* rtm_counters,
+                 RTMLockingCounters* stack_rtm_counters,
+                 Metadata* method_data,
+                 bool use_rtm, bool profile_rtm);
+  void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
+#if INCLUDE_RTM_OPT
+  void rtm_counters_update(Register abort_status, Register rtm_counters);
+  void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
+  void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
+                                   RTMLockingCounters* rtm_counters,
+                                   Metadata* method_data);
+  void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
+                     RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
+  void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
+  void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
+  void rtm_stack_locking(Register obj, Register tmp, Register scr,
+                         Register retry_on_abort_count,
+                         RTMLockingCounters* stack_rtm_counters,
+                         Metadata* method_data, bool profile_rtm,
+                         Label& DONE_LABEL, Label& IsInflated);
+  void rtm_inflated_locking(Register obj, Register box, Register tmp,
+                            Register scr, Register retry_on_busy_count,
+                            Register retry_on_abort_count,
+                            RTMLockingCounters* rtm_counters,
+                            Metadata* method_data, bool profile_rtm,
+                            Label& DONE_LABEL);
+#endif
 #endif
 
   Condition negate_condition(Condition cond);
@@ -721,6 +752,7 @@
 
 
   void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
+  void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
 
 
   void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
@@ -762,7 +794,14 @@
   // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
   void cond_inc32(Condition cond, AddressLiteral counter_addr);
   // Unconditional atomic increment.
-  void atomic_incl(AddressLiteral counter_addr);
+  void atomic_incl(Address counter_addr);
+  void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
+#ifdef _LP64
+  void atomic_incq(Address counter_addr);
+  void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
+#endif
+  void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
+  void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
 
   void lea(Register dst, AddressLiteral adr);
   void lea(Address dst, AddressLiteral adr);
@@ -1074,7 +1113,11 @@
 
   void movptr(Register dst, Address src);
 
-  void movptr(Register dst, AddressLiteral src);
+#ifdef _LP64
+  void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
+#else
+  void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
+#endif
 
   void movptr(Register dst, intptr_t src);
   void movptr(Register dst, Register src);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/rtmLocking.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/task.hpp"
+#include "runtime/rtmLocking.hpp"
+
+// One-shot PeriodicTask subclass for enabling RTM locking
+uintx RTMLockingCounters::_calculation_flag = 0;
+
+class RTMLockingCalculationTask : public PeriodicTask {
+ public:
+  RTMLockingCalculationTask(size_t interval_time) : PeriodicTask(interval_time){  }
+
+  virtual void task() {
+    RTMLockingCounters::_calculation_flag = 1;
+    // Reclaim our storage and disenroll ourself
+    delete this;
+  }
+};
+
+void RTMLockingCounters::init() {
+  if (UseRTMLocking && RTMLockingCalculationDelay > 0) {
+    RTMLockingCalculationTask* task = new RTMLockingCalculationTask(RTMLockingCalculationDelay);
+    task->enroll();
+  } else {
+    _calculation_flag = 1;
+  }
+}
+
+//------------------------------print_on-------------------------------
+void RTMLockingCounters::print_on(outputStream* st) {
+  tty->print_cr("# rtm locks total (estimated): " UINTX_FORMAT, _total_count * RTMTotalCountIncrRate);
+  tty->print_cr("# rtm lock aborts  : " UINTX_FORMAT, _abort_count);
+  for (int i = 0; i < ABORT_STATUS_LIMIT; i++) {
+    tty->print_cr("# rtm lock aborts %d: " UINTX_FORMAT, i, _abortX_count[i]);
+  }
+}
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1817,6 +1817,13 @@
   // Frame is now completed as far as size and linkage.
   int frame_complete = ((intptr_t)__ pc()) - start;
 
+  if (UseRTMLocking) {
+    // Abort RTM transaction before calling JNI
+    // because critical section will be large and will be
+    // aborted anyway. Also nmethod could be deoptimized.
+    __ xabort(0);
+  }
+
   // Calculate the difference between rsp and rbp,. We need to know it
   // after the native call because on windows Java Natives will pop
   // the arguments and it is painful to do rsp relative addressing
@@ -3170,6 +3177,12 @@
   };
 
   address start = __ pc();
+
+  if (UseRTMLocking) {
+    // Abort RTM transaction before possible nmethod deoptimization.
+    __ xabort(0);
+  }
+
   // Push self-frame.
   __ subptr(rsp, return_off*wordSize);     // Epilog!
 
@@ -3355,6 +3368,14 @@
   address call_pc = NULL;
   bool cause_return = (poll_type == POLL_AT_RETURN);
   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
+
+  if (UseRTMLocking) {
+    // Abort RTM transaction before calling runtime
+    // because critical section will be large and will be
+    // aborted anyway. Also nmethod could be deoptimized.
+    __ xabort(0);
+  }
+
   // If cause_return is true we are at a poll_return and there is
   // the return address on the stack to the caller on the nmethod
   // that is safepoint. We can leave this return on the stack and
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2012,6 +2012,13 @@
   // Frame is now completed as far as size and linkage.
   int frame_complete = ((intptr_t)__ pc()) - start;
 
+    if (UseRTMLocking) {
+      // Abort RTM transaction before calling JNI
+      // because critical section will be large and will be
+      // aborted anyway. Also nmethod could be deoptimized.
+      __ xabort(0);
+    }
+
 #ifdef ASSERT
     {
       Label L;
@@ -3612,6 +3619,11 @@
 
   address start = __ pc();
 
+  if (UseRTMLocking) {
+    // Abort RTM transaction before possible nmethod deoptimization.
+    __ xabort(0);
+  }
+
   // Push self-frame.  We get here with a return address on the
   // stack, so rsp is 8-byte aligned until we allocate our frame.
   __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
@@ -3792,6 +3804,13 @@
   bool cause_return = (poll_type == POLL_AT_RETURN);
   bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
 
+  if (UseRTMLocking) {
+    // Abort RTM transaction before calling runtime
+    // because critical section will be large and will be
+    // aborted anyway. Also nmethod could be deoptimized.
+    __ xabort(0);
+  }
+
   // Make room for return address (or push it again)
   if (!cause_return) {
     __ push(rbx);
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -50,8 +50,13 @@
 const char*           VM_Version::_features_str = "";
 VM_Version::CpuidInfo VM_Version::_cpuid_info   = { 0, };
 
+// Address of instruction which causes SEGV
+address VM_Version::_cpuinfo_segv_addr = 0;
+// Address of instruction after the one which causes SEGV
+address VM_Version::_cpuinfo_cont_addr = 0;
+
 static BufferBlob* stub_blob;
-static const int stub_size = 550;
+static const int stub_size = 600;
 
 extern "C" {
   typedef void (*getPsrInfo_stub_t)(void*);
@@ -234,9 +239,9 @@
     // Check if OS has enabled XGETBV instruction to access XCR0
     // (OSXSAVE feature flag) and CPU supports AVX
     //
-    __ andl(rcx, 0x18000000);
+    __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
     __ cmpl(rcx, 0x18000000);
-    __ jccb(Assembler::notEqual, sef_cpuid);
+    __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
 
     //
     // XCR0, XFEATURE_ENABLED_MASK register
@@ -247,6 +252,47 @@
     __ movl(Address(rsi, 0), rax);
     __ movl(Address(rsi, 4), rdx);
 
+    __ andl(rax, 0x6); // xcr0 bits sse | ymm
+    __ cmpl(rax, 0x6);
+    __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
+
+    //
+    // Some OSs have a bug when upper 128bits of YMM
+    // registers are not restored after a signal processing.
+    // Generate SEGV here (reference through NULL)
+    // and check upper YMM bits after it.
+    //
+    VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
+
+    // load value into all 32 bytes of ymm7 register
+    __ movl(rcx, VM_Version::ymm_test_value());
+
+    __ movdl(xmm0, rcx);
+    __ pshufd(xmm0, xmm0, 0x00);
+    __ vinsertf128h(xmm0, xmm0, xmm0);
+    __ vmovdqu(xmm7, xmm0);
+#ifdef _LP64
+    __ vmovdqu(xmm8,  xmm0);
+    __ vmovdqu(xmm15, xmm0);
+#endif
+
+    __ xorl(rsi, rsi);
+    VM_Version::set_cpuinfo_segv_addr( __ pc() );
+    // Generate SEGV
+    __ movl(rax, Address(rsi, 0));
+
+    VM_Version::set_cpuinfo_cont_addr( __ pc() );
+    // Returns here after signal. Save xmm0 to check it later.
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
+    __ vmovdqu(Address(rsi,  0), xmm0);
+    __ vmovdqu(Address(rsi, 32), xmm7);
+#ifdef _LP64
+    __ vmovdqu(Address(rsi, 64), xmm8);
+    __ vmovdqu(Address(rsi, 96), xmm15);
+#endif
+
+    VM_Version::clean_cpuFeatures();
+
     //
     // cpuid(0x7) Structured Extended Features
     //
@@ -429,7 +475,7 @@
   }
 
   char buf[256];
-  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                cores_per_cpu(), threads_per_core(),
                cpu_family(), _model, _stepping,
                (supports_cmov() ? ", cmov" : ""),
@@ -446,8 +492,9 @@
                (supports_avx()    ? ", avx" : ""),
                (supports_avx2()   ? ", avx2" : ""),
                (supports_aes()    ? ", aes" : ""),
-               (supports_clmul()    ? ", clmul" : ""),
+               (supports_clmul()  ? ", clmul" : ""),
                (supports_erms()   ? ", erms" : ""),
+               (supports_rtm()    ? ", rtm" : ""),
                (supports_mmx_ext() ? ", mmxext" : ""),
                (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
                (supports_lzcnt()   ? ", lzcnt": ""),
@@ -488,7 +535,7 @@
     }
   } else if (UseAES) {
     if (!FLAG_IS_DEFAULT(UseAES))
-      warning("AES instructions not available on this CPU");
+      warning("AES instructions are not available on this CPU");
     FLAG_SET_DEFAULT(UseAES, false);
   }
 
@@ -521,10 +568,57 @@
     }
   } else if (UseAESIntrinsics) {
     if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
-      warning("AES intrinsics not available on this CPU");
+      warning("AES intrinsics are not available on this CPU");
     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   }
 
+  // Adjust RTM (Restricted Transactional Memory) flags
+  if (!supports_rtm() && UseRTMLocking) {
+    // Can't continue because UseRTMLocking affects UseBiasedLocking flag
+    // setting during arguments processing. See use_biased_locking().
+    // VM_Version_init() is executed after UseBiasedLocking is used
+    // in Thread::allocate().
+    vm_exit_during_initialization("RTM instructions are not available on this CPU");
+  }
+
+#if INCLUDE_RTM_OPT
+  if (UseRTMLocking) {
+    if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
+      // RTM locking should be used only for applications with
+      // high lock contention. For now we do not use it by default.
+      vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
+    }
+    if (!is_power_of_2(RTMTotalCountIncrRate)) {
+      warning("RTMTotalCountIncrRate must be a power of 2, resetting it to 64");
+      FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
+    }
+    if (RTMAbortRatio < 0 || RTMAbortRatio > 100) {
+      warning("RTMAbortRatio must be in the range 0 to 100, resetting it to 50");
+      FLAG_SET_DEFAULT(RTMAbortRatio, 50);
+    }
+  } else { // !UseRTMLocking
+    if (UseRTMForStackLocks) {
+      if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
+        warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
+      }
+      FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
+    }
+    if (UseRTMDeopt) {
+      FLAG_SET_DEFAULT(UseRTMDeopt, false);
+    }
+    if (PrintPreciseRTMLockingStatistics) {
+      FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
+    }
+  }
+#else
+  if (UseRTMLocking) {
+    // Only C2 does RTM locking optimization.
+    // Can't continue because UseRTMLocking affects UseBiasedLocking flag
+    // setting during arguments processing. See use_biased_locking().
+    vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
+  }
+#endif
+
 #ifdef COMPILER2
   if (UseFPUForSpilling) {
     if (UseSSE < 2) {
@@ -540,14 +634,28 @@
     if (MaxVectorSize > 32) {
       FLAG_SET_DEFAULT(MaxVectorSize, 32);
     }
-    if (MaxVectorSize > 16 && UseAVX == 0) {
-      // Only supported with AVX+
+    if (MaxVectorSize > 16 && (UseAVX == 0 || !os_supports_avx_vectors())) {
+      // 32 bytes vectors (in YMM) are only supported with AVX+
       FLAG_SET_DEFAULT(MaxVectorSize, 16);
     }
     if (UseSSE < 2) {
-      // Only supported with SSE2+
+      // Vectors (in XMM) are only supported with SSE2+
       FLAG_SET_DEFAULT(MaxVectorSize, 0);
     }
+#ifdef ASSERT
+    if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
+      tty->print_cr("State of YMM registers after signal handle:");
+      int nreg = 2 LP64_ONLY(+2);
+      const char* ymm_name[4] = {"0", "7", "8", "15"};
+      for (int i = 0; i < nreg; i++) {
+        tty->print("YMM%s:", ymm_name[i]);
+        for (int j = 7; j >=0; j--) {
+          tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
+        }
+        tty->cr();
+      }
+    }
+#endif
   }
 #endif
 
@@ -678,14 +786,6 @@
       }
     }
   }
-#if defined(COMPILER2) && defined(_ALLBSD_SOURCE)
-    if (MaxVectorSize > 16) {
-      // Limit vectors size to 16 bytes on BSD until it fixes
-      // restoring upper 128bit of YMM registers on return
-      // from signal handler.
-      FLAG_SET_DEFAULT(MaxVectorSize, 16);
-    }
-#endif // COMPILER2
 
   // Use count leading zeros count instruction if available.
   if (supports_lzcnt()) {
@@ -814,6 +914,11 @@
     if (UseAES) {
       tty->print("  UseAES=1");
     }
+#ifdef COMPILER2
+    if (MaxVectorSize > 0) {
+      tty->print("  MaxVectorSize=%d", MaxVectorSize);
+    }
+#endif
     tty->cr();
     tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) {
@@ -856,6 +961,27 @@
 #endif // !PRODUCT
 }
 
+bool VM_Version::use_biased_locking() {
+#if INCLUDE_RTM_OPT
+  // RTM locking is most useful when there is high lock contention and
+  // low data contention.  With high lock contention the lock is usually
+  // inflated and biased locking is not suitable for that case.
+  // RTM locking code requires that biased locking is off.
+  // Note: we can't switch off UseBiasedLocking in get_processor_features()
+  // because it is used by Thread::allocate() which is called before
+  // VM_Version::initialize().
+  if (UseRTMLocking && UseBiasedLocking) {
+    if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
+      FLAG_SET_DEFAULT(UseBiasedLocking, false);
+    } else {
+      warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
+      UseBiasedLocking = false;
+    }
+  }
+#endif
+  return UseBiasedLocking;
+}
+
 void VM_Version::initialize() {
   ResourceMark rm;
   // Making this stub must be FIRST use of assembler
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -207,7 +207,9 @@
                         : 2,
                    bmi2 : 1,
                    erms : 1,
-                        : 22;
+                        : 1,
+                   rtm  : 1,
+                        : 20;
     } bits;
   };
 
@@ -229,6 +231,9 @@
                                // 0 if this instruction is not available
   static const char* _features_str;
 
+  static address   _cpuinfo_segv_addr; // address of instruction which causes SEGV
+  static address   _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV
+
   enum {
     CPU_CX8    = (1 << 0), // next bits are from cpuid 1 (EDX)
     CPU_CMOV   = (1 << 1),
@@ -254,7 +259,8 @@
     CPU_ERMS   = (1 << 20), // enhanced 'rep movsb/stosb' instructions
     CPU_CLMUL  = (1 << 21), // carryless multiply for CRC
     CPU_BMI1   = (1 << 22),
-    CPU_BMI2   = (1 << 23)
+    CPU_BMI2   = (1 << 23),
+    CPU_RTM    = (1 << 24)  // Restricted Transactional Memory instructions
   } cpuFeatureFlags;
 
   enum {
@@ -361,6 +367,9 @@
     // extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
     XemXcr0Eax   xem_xcr0_eax;
     uint32_t     xem_xcr0_edx; // reserved
+
+    // Space to save ymm registers after signal handle
+    int          ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15
   };
 
   // The actual cpuid info block
@@ -438,6 +447,8 @@
       result |= CPU_ERMS;
     if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
       result |= CPU_CLMUL;
+    if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
+      result |= CPU_RTM;
 
     // AMD features.
     if (is_amd()) {
@@ -460,6 +471,21 @@
     return result;
   }
 
+  static bool os_supports_avx_vectors() {
+    if (!supports_avx()) {
+      return false;
+    }
+    // Verify that OS save/restore all bits of AVX registers
+    // during signal processing.
+    int nreg = 2 LP64_ONLY(+2);
+    for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
+      if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   static void get_processor_features();
 
 public:
@@ -476,10 +502,26 @@
   static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
   static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
   static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
+  static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
+
+  // The value used to check ymm register after signal handle
+  static int ymm_test_value()    { return 0xCAFEBABE; }
+
+  static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
+  static bool  is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
+  static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
+  static address  cpuinfo_cont_addr()           { return _cpuinfo_cont_addr; }
+
+  static void clean_cpuFeatures()   { _cpuFeatures = 0; }
+  static void set_avx_cpuFeatures() { _cpuFeatures = (CPU_SSE | CPU_SSE2 | CPU_AVX); }
+
 
   // Initialization
   static void initialize();
 
+  // Override Abstract_VM_Version implementation
+  static bool use_biased_locking();
+
   // Asserts
   static void assert_is_initialized() {
     assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
@@ -572,6 +614,7 @@
   static bool supports_aes()      { return (_cpuFeatures & CPU_AES) != 0; }
   static bool supports_erms()     { return (_cpuFeatures & CPU_ERMS) != 0; }
   static bool supports_clmul()    { return (_cpuFeatures & CPU_CLMUL) != 0; }
+  static bool supports_rtm()      { return (_cpuFeatures & CPU_RTM) != 0; }
   static bool supports_bmi1()     { return (_cpuFeatures & CPU_BMI1) != 0; }
   static bool supports_bmi2()     { return (_cpuFeatures & CPU_BMI2) != 0; }
   // Intel features
--- a/hotspot/src/cpu/x86/vm/x86.ad	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/x86.ad	Thu Mar 27 14:15:42 2014 +0100
@@ -474,7 +474,125 @@
 
 %}
 
+
+//----------SOURCE BLOCK-------------------------------------------------------
+// This is a block of C++ code which provides values, functions, and
+// definitions necessary in the rest of the architecture description
+
+source_hpp %{
+// Header information of the source block.
+// Method declarations/definitions which are used outside
+// the ad-scope can conveniently be defined here.
+//
+// To keep related declarations/definitions/uses close together,
+// we switch between source %{ }% and source_hpp %{ }% freely as needed.
+
+class CallStubImpl {
+ 
+  //--------------------------------------------------------------
+  //---<  Used for optimization in Compile::shorten_branches  >---
+  //--------------------------------------------------------------
+
+ public:
+  // Size of call trampoline stub.
+  static uint size_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+  
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() { 
+    return 0; // no call trampolines on this platform
+  }
+};
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+    // NativeCall instruction size is the same as NativeJump.
+    // exception handler starts out as jump and can be patched to
+    // a call be deoptimization.  (4932387)
+    // Note that this value is also credited (in output.cpp) to
+    // the size of the code section.
+    return NativeJump::instruction_size;
+  }
+
+#ifdef _LP64
+  static uint size_deopt_handler() {
+    // three 5 byte instructions
+    return 15;
+  }
+#else
+  static uint size_deopt_handler() {
+    // NativeCall instruction size is the same as NativeJump.
+    // exception handler starts out as jump and can be patched to
+    // a call be deoptimization.  (4932387)
+    // Note that this value is also credited (in output.cpp) to
+    // the size of the code section.
+    return 5 + NativeJump::instruction_size; // pushl(); jmp;
+  }
+#endif
+};
+
+%} // end source_hpp
+
 source %{
+
+// Emit exception handler code.
+// Stuff framesize into a register and call a VM stub routine.
+int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
+
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a handler.
+  MacroAssembler _masm(&cbuf);
+  address base = __ start_a_stub(size_exception_handler());
+  if (base == NULL)  return 0;  // CodeBuffer::expand failed
+  int offset = __ offset();
+  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
+  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
+  __ end_a_stub();
+  return offset;
+}
+
+// Emit deopt handler code.
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
+
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a handler.
+  MacroAssembler _masm(&cbuf);
+  address base = __ start_a_stub(size_deopt_handler());
+  if (base == NULL)  return 0;  // CodeBuffer::expand failed
+  int offset = __ offset();
+
+#ifdef _LP64
+  address the_pc = (address) __ pc();
+  Label next;
+  // push a "the_pc" on the stack without destroying any registers
+  // as they all may be live.
+
+  // push address of "next"
+  __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
+  __ bind(next);
+  // adjust it so it matches "the_pc"
+  __ subptr(Address(rsp, 0), __ offset() - offset);
+#else
+  InternalAddress here(__ pc());
+  __ pushptr(here.addr());
+#endif
+
+  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+  __ end_a_stub();
+  return offset;
+}
+
+
+//=============================================================================
+
   // Float masks come from different places depending on platform.
 #ifdef _LP64
   static address float_signmask()  { return StubRoutines::x86::float_sign_mask(); }
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Mar 27 14:15:42 2014 +0100
@@ -1297,59 +1297,6 @@
 
 
 //=============================================================================
-uint size_exception_handler() {
-  // NativeCall instruction size is the same as NativeJump.
-  // exception handler starts out as jump and can be patched to
-  // a call be deoptimization.  (4932387)
-  // Note that this value is also credited (in output.cpp) to
-  // the size of the code section.
-  return NativeJump::instruction_size;
-}
-
-// Emit exception handler code.  Stuff framesize into a register
-// and call a VM stub routine.
-int emit_exception_handler(CodeBuffer& cbuf) {
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a handler.
-  MacroAssembler _masm(&cbuf);
-  address base =
-  __ start_a_stub(size_exception_handler());
-  if (base == NULL)  return 0;  // CodeBuffer::expand failed
-  int offset = __ offset();
-  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
-  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
-  __ end_a_stub();
-  return offset;
-}
-
-uint size_deopt_handler() {
-  // NativeCall instruction size is the same as NativeJump.
-  // exception handler starts out as jump and can be patched to
-  // a call be deoptimization.  (4932387)
-  // Note that this value is also credited (in output.cpp) to
-  // the size of the code section.
-  return 5 + NativeJump::instruction_size; // pushl(); jmp;
-}
-
-// Emit deopt handler code.
-int emit_deopt_handler(CodeBuffer& cbuf) {
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a handler.
-  MacroAssembler _masm(&cbuf);
-  address base =
-  __ start_a_stub(size_exception_handler());
-  if (base == NULL)  return 0;  // CodeBuffer::expand failed
-  int offset = __ offset();
-  InternalAddress here(__ pc());
-  __ pushptr(here.addr());
-
-  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
-  __ end_a_stub();
-  return offset;
-}
 
 int Matcher::regnum_to_fpu_offset(int regnum) {
   return regnum - 32; // The FP registers are in the second chunk
@@ -12925,13 +12872,31 @@
 
 // inlined locking and unlocking
 
+instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eDXRegI scr, rRegI cx1, rRegI cx2) %{
+  predicate(Compile::current()->use_rtm());
+  match(Set cr (FastLock object box));
+  effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
+  ins_cost(300);
+  format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+                 $scr$$Register, $cx1$$Register, $cx2$$Register,
+                 _counters, _rtm_counters, _stack_rtm_counters,
+                 ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
+                 true, ra_->C->profile_rtm());
+  %}
+  ins_pipe(pipe_slow);
+%}
+
 instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
+  predicate(!Compile::current()->use_rtm());
   match(Set cr (FastLock object box));
   effect(TEMP tmp, TEMP scr, USE_KILL box);
   ins_cost(300);
   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
   ins_encode %{
-    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+                 $scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -12942,7 +12907,7 @@
   ins_cost(300);
   format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
   ins_encode %{
-    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
   %}
   ins_pipe(pipe_slow);
 %}
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Mar 27 14:15:42 2014 +0100
@@ -1439,66 +1439,9 @@
   return MachNode::size(ra_); // too many variables; just compute it
                               // the hard way
 }
-
+ 
 
 //=============================================================================
-uint size_exception_handler()
-{
-  // NativeCall instruction size is the same as NativeJump.
-  // Note that this value is also credited (in output.cpp) to
-  // the size of the code section.
-  return NativeJump::instruction_size;
-}
-
-// Emit exception handler code.
-int emit_exception_handler(CodeBuffer& cbuf)
-{
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a handler.
-  MacroAssembler _masm(&cbuf);
-  address base =
-  __ start_a_stub(size_exception_handler());
-  if (base == NULL)  return 0;  // CodeBuffer::expand failed
-  int offset = __ offset();
-  __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
-  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
-  __ end_a_stub();
-  return offset;
-}
-
-uint size_deopt_handler()
-{
-  // three 5 byte instructions
-  return 15;
-}
-
-// Emit deopt handler code.
-int emit_deopt_handler(CodeBuffer& cbuf)
-{
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a handler.
-  MacroAssembler _masm(&cbuf);
-  address base =
-  __ start_a_stub(size_deopt_handler());
-  if (base == NULL)  return 0;  // CodeBuffer::expand failed
-  int offset = __ offset();
-  address the_pc = (address) __ pc();
-  Label next;
-  // push a "the_pc" on the stack without destroying any registers
-  // as they all may be live.
-
-  // push address of "next"
-  __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
-  __ bind(next);
-  // adjust it so it matches "the_pc"
-  __ subptr(Address(rsp, 0), __ offset() - offset);
-  __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
-  __ end_a_stub();
-  return offset;
-}
 
 int Matcher::regnum_to_fpu_offset(int regnum)
 {
@@ -11387,13 +11330,31 @@
 // ============================================================================
 // inlined locking and unlocking
 
+instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rdx_RegI scr, rRegI cx1, rRegI cx2) %{
+  predicate(Compile::current()->use_rtm());
+  match(Set cr (FastLock object box));
+  effect(TEMP tmp, TEMP scr, TEMP cx1, TEMP cx2, USE_KILL box);
+  ins_cost(300);
+  format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr,$cx1,$cx2" %}
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+                 $scr$$Register, $cx1$$Register, $cx2$$Register,
+                 _counters, _rtm_counters, _stack_rtm_counters,
+                 ((Method*)(ra_->C->method()->constant_encoding()))->method_data(),
+                 true, ra_->C->profile_rtm());
+  %}
+  ins_pipe(pipe_slow);
+%}
+
 instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
+  predicate(!Compile::current()->use_rtm());
   match(Set cr (FastLock object box));
   effect(TEMP tmp, TEMP scr, USE_KILL box);
   ins_cost(300);
   format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
   ins_encode %{
-    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
+                 $scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -11404,7 +11365,7 @@
   ins_cost(300);
   format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
   ins_encode %{
-    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, ra_->C->use_rtm());
   %}
   ins_pipe(pipe_slow);
 %}
--- a/hotspot/src/os/aix/vm/mutex_aix.inline.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os/aix/vm/mutex_aix.inline.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,6 @@
 
 #include "os_aix.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "thread_aix.inline.hpp"
+#include "runtime/thread.inline.hpp"
 
 #endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,10 +61,10 @@
 #include "runtime/statSampler.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadCritical.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "services/attachListener.hpp"
 #include "services/runtimeService.hpp"
-#include "thread_aix.inline.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
--- a/hotspot/src/os/aix/vm/threadCritical_aix.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os/aix/vm/threadCritical_aix.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/threadCritical.hpp"
-#include "thread_aix.inline.hpp"
+#include "runtime/thread.inline.hpp"
 
 // put OS-includes here
 # include <pthread.h>
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2425,6 +2425,12 @@
     }
   }
 
+  if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
+      VM_Version::is_cpuinfo_segv_addr(pc)) {
+    // Verify that OS save/restore AVX registers.
+    return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
+  }
+
   if (t != NULL && t->is_Java_thread()) {
     JavaThread* thread = (JavaThread*) t;
     bool in_java = thread->thread_state() == _thread_in_Java;
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,8 +49,8 @@
 #include "runtime/osThread.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
-#include "thread_aix.inline.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 #ifdef COMPILER1
--- a/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,14 @@
 
 #include "precompiled.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#include "thread_aix.inline.hpp"
+#include "runtime/thread.hpp"
 
 void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
+  // Nothing we can do here for user-level thread.
 }
 
 void ThreadLocalStorage::pd_init() {
-  // Nothing to do
+  // Nothing to do.
 }
 
 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
--- a/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.inline.hpp"
-#include "thread_aix.inline.hpp"
+#include "runtime/frame.hpp"
+#include "runtime/thread.hpp"
 
 // Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -492,6 +492,11 @@
       }
     }
 
+    if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) {
+      // Verify that OS save/restore AVX registers.
+      stub = VM_Version::cpuinfo_cont_addr();
+    }
+
     // We test if stub is already set (by the stack overflow code
     // above) so it is not overwritten by the code that follows. This
     // check is not required on other platforms, because on other
--- a/hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.inline.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/frame.hpp"
+#include "runtime/thread.hpp"
 
 // Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -338,6 +338,11 @@
       }
     }
 
+    if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
+      // Verify that OS save/restore AVX registers.
+      stub = VM_Version::cpuinfo_cont_addr();
+    }
+
     if (thread->thread_state() == _thread_in_Java) {
       // Java thread running in Java code => find exception handler if any
       // a fault inside compiled code, the interpreter, or a stub
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -459,6 +459,11 @@
       }
     }
 
+    if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
+      // Verify that OS save/restore AVX registers.
+      stub = VM_Version::cpuinfo_cont_addr();
+    }
+
     if (thread->thread_state() == _thread_in_vm) {
       if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
         stub = StubRoutines::handler_for_unsafe_access();
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1582,6 +1582,8 @@
 
       if( node->is_ideal_fastlock() && new_inst->is_ideal_fastlock() ) {
         fprintf(fp, "  ((MachFastLockNode*)n%d)->_counters = _counters;\n",cnt);
+        fprintf(fp, "  ((MachFastLockNode*)n%d)->_rtm_counters = _rtm_counters;\n",cnt);
+        fprintf(fp, "  ((MachFastLockNode*)n%d)->_stack_rtm_counters = _stack_rtm_counters;\n",cnt);
       }
 
       // Fill in the bottom_type where requested
@@ -3963,6 +3965,8 @@
   }
   if( inst->is_ideal_fastlock() ) {
     fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
+    fprintf(fp_cpp, "%s node->_rtm_counters = _leaf->as_FastLock()->rtm_counters();\n", indent);
+    fprintf(fp_cpp, "%s node->_stack_rtm_counters = _leaf->as_FastLock()->stack_rtm_counters();\n", indent);
   }
 
 }
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2526,7 +2526,7 @@
     // need to free up storage used for OSR entry point
     LIR_Opr osrBuffer = block()->next()->operand();
     BasicTypeList signature;
-    signature.append(T_INT);
+    signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
     __ move(osrBuffer, cc->args()->at(0));
     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -308,9 +308,6 @@
   develop(intx, InstructionCountCutoff, 37000,                              \
           "If GraphBuilder adds this many instructions, bails out")         \
                                                                             \
-  product_pd(intx, SafepointPollOffset,                                     \
-          "Offset added to polling address (Intel only)")                   \
-                                                                            \
   develop(bool, ComputeExactFPURegisterUsage, true,                         \
           "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
                                                                             \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -961,7 +961,8 @@
                             AbstractCompiler* compiler,
                             int comp_level,
                             bool has_unsafe_access,
-                            bool has_wide_vectors) {
+                            bool has_wide_vectors,
+                            RTMState  rtm_state) {
   VM_ENTRY_MARK;
   nmethod* nm = NULL;
   {
@@ -1002,6 +1003,15 @@
 
     methodHandle method(THREAD, target->get_Method());
 
+#if INCLUDE_RTM_OPT
+    if (!failing() && (rtm_state != NoRTM) &&
+        (method()->method_data() != NULL) &&
+        (method()->method_data()->rtm_state() != rtm_state)) {
+      // Preemptive decompile if rtm state was changed.
+      record_failure("RTM state change invalidated rtm code");
+    }
+#endif
+
     if (failing()) {
       // While not a true deoptimization, it is a preemptive decompile.
       MethodData* mdo = method()->method_data();
@@ -1028,13 +1038,15 @@
                                frame_words, oop_map_set,
                                handler_table, inc_table,
                                compiler, comp_level);
-
     // Free codeBlobs
     code_buffer->free_blob();
 
     if (nm != NULL) {
       nm->set_has_unsafe_access(has_unsafe_access);
       nm->set_has_wide_vectors(has_wide_vectors);
+#if INCLUDE_RTM_OPT
+      nm->set_rtm_state(rtm_state);
+#endif
 
       // Record successful registration.
       // (Put nm into the task handle *before* publishing to the Java heap.)
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -365,7 +365,8 @@
                        AbstractCompiler*         compiler,
                        int                       comp_level,
                        bool                      has_unsafe_access,
-                       bool                      has_wide_vectors);
+                       bool                      has_wide_vectors,
+                       RTMState                  rtm_state = NoRTM);
 
 
   // Access to certain well known ciObjects.
--- a/hotspot/src/share/vm/ci/ciMethodData.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/ci/ciMethodData.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -478,6 +478,18 @@
 
   int invocation_count() { return _invocation_counter; }
   int backedge_count()   { return _backedge_counter;   }
+
+#if INCLUDE_RTM_OPT
+  // return cached value
+  int rtm_state() {
+    if (is_empty()) {
+      return NoRTM;
+    } else {
+      return get_MethodData()->rtm_state();
+    }
+  }
+#endif
+
   // Transfer information about the method to MethodData*.
   // would_profile means we would like to profile this method,
   // meaning it's not trivial.
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -460,7 +460,9 @@
   _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
   _compiler                = NULL;
-
+#if INCLUDE_RTM_OPT
+  _rtm_state               = NoRTM;
+#endif
 #ifdef HAVE_DTRACE_H
   _trap_offset             = 0;
 #endif // def HAVE_DTRACE_H
--- a/hotspot/src/share/vm/code/nmethod.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -193,6 +193,12 @@
 
   jbyte _scavenge_root_state;
 
+#if INCLUDE_RTM_OPT
+  // RTM state at compile time. Used during deoptimization to decide
+  // whether to restart collecting RTM locking abort statistic again.
+  RTMState _rtm_state;
+#endif
+
   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
   // and is not made into a zombie. However, once the nmethod is made into
   // a zombie, it will be locked one final time if CompiledMethodUnload
@@ -414,6 +420,12 @@
   bool  is_zombie() const                         { return _state == zombie; }
   bool  is_unloaded() const                       { return _state == unloaded;   }
 
+#if INCLUDE_RTM_OPT
+  // rtm state accessing and manipulating
+  RTMState  rtm_state() const                     { return _rtm_state; }
+  void set_rtm_state(RTMState state)              { _rtm_state = state; }
+#endif
+
   // Make the nmethod non entrant. The nmethod will continue to be
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -273,7 +273,7 @@
 }
 
 address Method::bcp_from(int bci) const {
-  assert((is_native() && bci == 0)  || (!is_native() && 0 <= bci && bci < code_size()), "illegal bci");
+  assert((is_native() && bci == 0)  || (!is_native() && 0 <= bci && bci < code_size()), err_msg("illegal bci: %d", bci));
   address bcp = code_base() + bci;
   assert(is_native() && bcp == code_base() || contains(bcp), "bcp doesn't belong to this method");
   return bcp;
--- a/hotspot/src/share/vm/oops/methodData.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "compiler/compilerOracle.hpp"
 #include "interpreter/bytecode.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/linkResolver.hpp"
@@ -1153,6 +1154,21 @@
   _highest_osr_comp_level = 0;
   _would_profile = true;
 
+#if INCLUDE_RTM_OPT
+  _rtm_state = NoRTM; // No RTM lock eliding by default
+  if (UseRTMLocking &&
+      !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) {
+    if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) {
+      // Generate RTM lock eliding code without abort ratio calculation code.
+      _rtm_state = UseRTM;
+    } else if (UseRTMDeopt) {
+      // Generate RTM lock eliding code and include abort ratio calculation
+      // code if UseRTMDeopt is on.
+      _rtm_state = ProfileRTM;
+    }
+  }
+#endif
+
   // Initialize flags and trap history.
   _nof_decompiles = 0;
   _nof_overflow_recompiles = 0;
--- a/hotspot/src/share/vm/oops/methodData.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2052,7 +2052,7 @@
 
   // Whole-method sticky bits and flags
   enum {
-    _trap_hist_limit    = 18,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 19,   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
@@ -2083,6 +2083,12 @@
   // Counter values at the time profiling started.
   int               _invocation_counter_start;
   int               _backedge_counter_start;
+
+#if INCLUDE_RTM_OPT
+  // State of RTM code generation during compilation of the method
+  int               _rtm_state;
+#endif
+
   // Number of loops and blocks is computed when compiling the first
   // time with C1. It is used to determine if method is trivial.
   short             _num_loops;
@@ -2246,6 +2252,22 @@
   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
 
+#if INCLUDE_RTM_OPT
+  int rtm_state() const {
+    return _rtm_state;
+  }
+  void set_rtm_state(RTMState rstate) {
+    _rtm_state = (int)rstate;
+  }
+  void atomic_set_rtm_state(RTMState rstate) {
+    Atomic::store((int)rstate, &_rtm_state);
+  }
+
+  static int rtm_state_offset_in_bytes() {
+    return offset_of(MethodData, _rtm_state);
+  }
+#endif
+
   void set_would_profile(bool p)              { _would_profile = p;    }
   bool would_profile() const                  { return _would_profile; }
 
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -446,6 +446,9 @@
   diagnostic(bool, PrintPreciseBiasedLockingStatistics, false,              \
           "Print per-lock-site statistics of biased locking in JVM")        \
                                                                             \
+  diagnostic(bool, PrintPreciseRTMLockingStatistics, false,                 \
+          "Print per-lock-site statistics of rtm locking in JVM")           \
+                                                                            \
   notproduct(bool, PrintEliminateLocks, false,                              \
           "Print out when locks are eliminated")                            \
                                                                             \
--- a/hotspot/src/share/vm/opto/classes.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -198,6 +198,7 @@
 macro(NeverBranch)
 macro(Opaque1)
 macro(Opaque2)
+macro(Opaque3)
 macro(OrI)
 macro(OrL)
 macro(OverflowAddI)
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -694,9 +694,10 @@
   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 
-  if (ProfileTraps) {
+  if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) {
     // Make sure the method being compiled gets its own MDO,
     // so we can at least track the decompile_count().
+    // Need MDO to record RTM code generation state.
     method()->ensure_method_data();
   }
 
@@ -907,7 +908,8 @@
                            compiler,
                            env()->comp_level(),
                            has_unsafe_access(),
-                           SharedRuntime::is_wide_vector(max_vector_size())
+                           SharedRuntime::is_wide_vector(max_vector_size()),
+                           rtm_state()
                            );
 
     if (log() != NULL) // Print code cache state into compiler log
@@ -1073,7 +1075,23 @@
   set_do_scheduling(OptoScheduling);
   set_do_count_invocations(false);
   set_do_method_data_update(false);
-
+  set_rtm_state(NoRTM); // No RTM lock eliding by default
+#if INCLUDE_RTM_OPT
+  if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
+    int rtm_state = method()->method_data()->rtm_state();
+    if (method_has_option("NoRTMLockEliding") || ((rtm_state & NoRTM) != 0)) {
+      // Don't generate RTM lock eliding code.
+      set_rtm_state(NoRTM);
+    } else if (method_has_option("UseRTMLockEliding") || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) {
+      // Generate RTM lock eliding code without abort ratio calculation code.
+      set_rtm_state(UseRTM);
+    } else if (UseRTMDeopt) {
+      // Generate RTM lock eliding code and include abort ratio calculation
+      // code if UseRTMDeopt is on.
+      set_rtm_state(ProfileRTM);
+    }
+  }
+#endif
   if (debug_info()->recording_non_safepoints()) {
     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
                         (comp_arena(), 8, 0, NULL));
@@ -2581,6 +2599,7 @@
     break;
   case Op_Opaque1:              // Remove Opaque Nodes before matching
   case Op_Opaque2:              // Remove Opaque Nodes before matching
+  case Op_Opaque3:
     n->subsume_by(n->in(1), this);
     break;
   case Op_CallStaticJava:
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -319,9 +319,9 @@
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
 #endif
-
   // JSR 292
   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
+  RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
 
   // Compilation environment.
   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
@@ -591,6 +591,10 @@
   void          set_print_inlining(bool z)       { _print_inlining = z; }
   bool              print_intrinsics() const     { return _print_intrinsics; }
   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
+  RTMState          rtm_state()  const           { return _rtm_state; }
+  void          set_rtm_state(RTMState s)        { _rtm_state = s; }
+  bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
+  bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
--- a/hotspot/src/share/vm/opto/connode.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/connode.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -642,6 +642,19 @@
   virtual const Type *bottom_type() const { return TypeInt::INT; }
 };
 
+//------------------------------Opaque3Node------------------------------------
+// A node to prevent unwanted optimizations. Will be optimized only during
+// macro nodes expansion.
+class Opaque3Node : public Opaque2Node {
+  int _opt; // what optimization it was used for
+public:
+  enum { RTM_OPT };
+  Opaque3Node(Compile* C, Node *n, int opt) : Opaque2Node(C, n), _opt(opt) {}
+  virtual int Opcode() const;
+  bool rtm_opt() const { return (_opt == RTM_OPT); }
+};
+
+
 //----------------------PartialSubtypeCheckNode--------------------------------
 // The 2nd slow-half of a subtype check.  Scan the subklass's 2ndary superklass
 // array for an instance of the superklass.  Set a hidden internal cache on a
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1125,6 +1125,17 @@
   }
   return _gvn.transform( new (C) ConvI2LNode(offset));
 }
+
+Node* GraphKit::ConvI2UL(Node* offset) {
+  juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
+  if (offset_con != (juint) Type::OffsetBot) {
+    return longcon((julong) offset_con);
+  }
+  Node* conv = _gvn.transform( new (C) ConvI2LNode(offset));
+  Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) );
+  return _gvn.transform( new (C) AndLNode(conv, mask) );
+}
+
 Node* GraphKit::ConvL2I(Node* offset) {
   // short-circuit a common case
   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
@@ -3151,10 +3162,14 @@
   Node* mem = reset_memory();
 
   FastLockNode * flock = _gvn.transform(new (C) FastLockNode(0, obj, box) )->as_FastLock();
-  if (PrintPreciseBiasedLockingStatistics) {
+  if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
     // Create the counters for this fast lock.
     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
   }
+
+  // Create the rtm counters for this fast lock if needed.
+  flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
+
   // Add monitor to debug info for the slow path.  If we block inside the
   // slow path and de-opt, we need the monitor hanging around
   map()->push_monitor( flock );
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -338,6 +338,7 @@
   // Convert between int and long, and size_t.
   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
   Node* ConvI2L(Node* offset);
+  Node* ConvI2UL(Node* offset);
   Node* ConvL2I(Node* offset);
   // Find out the klass of an object.
   Node* load_object_klass(Node* object);
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2600,7 +2600,7 @@
     case T_ADDRESS:
       // Cast to an int type.
       p = _gvn.transform(new (C) CastP2XNode(NULL, p));
-      p = ConvX2L(p);
+      p = ConvX2UL(p);
       break;
     default:
       fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
--- a/hotspot/src/share/vm/opto/locknode.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/locknode.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -136,6 +136,8 @@
 //-----------------------------hash--------------------------------------------
 uint FastLockNode::hash() const { return NO_HASH; }
 
+uint FastLockNode::size_of() const { return sizeof(*this); }
+
 //------------------------------cmp--------------------------------------------
 uint FastLockNode::cmp( const Node &n ) const {
   return (&n == this);                // Always fail except on self
@@ -159,6 +161,22 @@
   _counters = blnc->counters();
 }
 
+void FastLockNode::create_rtm_lock_counter(JVMState* state) {
+#if INCLUDE_RTM_OPT
+  Compile* C = Compile::current();
+  if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
+    RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
+           OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
+    _rtm_counters = rlnc->counters();
+    if (UseRTMForStackLocks) {
+      rlnc = (RTMLockingNamedCounter*)
+           OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
+      _stack_rtm_counters = rlnc->counters();
+    }
+  }
+#endif
+}
+
 //=============================================================================
 //------------------------------do_monitor_enter-------------------------------
 void Parse::do_monitor_enter() {
--- a/hotspot/src/share/vm/opto/locknode.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/locknode.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -92,13 +92,17 @@
 //------------------------------FastLockNode-----------------------------------
 class FastLockNode: public CmpNode {
 private:
-  BiasedLockingCounters* _counters;
+  BiasedLockingCounters*        _counters;
+  RTMLockingCounters*       _rtm_counters; // RTM lock counters for inflated locks
+  RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks
 
 public:
   FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
     init_req(0,ctrl);
     init_class_id(Class_FastLock);
     _counters = NULL;
+    _rtm_counters = NULL;
+    _stack_rtm_counters = NULL;
   }
   Node* obj_node() const { return in(1); }
   Node* box_node() const { return in(2); }
@@ -107,13 +111,17 @@
   // FastLock and FastUnlockNode do not hash, we need one for each correspoding
   // LockNode/UnLockNode to avoid creating Phi's.
   virtual uint hash() const ;                  // { return NO_HASH; }
+  virtual uint size_of() const;
   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
   virtual int Opcode() const;
   virtual const Type *Value( PhaseTransform *phase ) const { return TypeInt::CC; }
   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
 
   void create_lock_counter(JVMState* s);
-  BiasedLockingCounters* counters() const { return _counters; }
+  void create_rtm_lock_counter(JVMState* state);
+  BiasedLockingCounters*        counters() const { return _counters; }
+  RTMLockingCounters*       rtm_counters() const { return _rtm_counters; }
+  RTMLockingCounters* stack_rtm_counters() const { return _stack_rtm_counters; }
 };
 
 
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -617,6 +617,15 @@
       case Op_AryEq: {
         return false;
       }
+#if INCLUDE_RTM_OPT
+      case Op_FastLock:
+      case Op_FastUnlock: {
+        // Don't unroll RTM locking code because it is large.
+        if (UseRTMLocking) {
+          return false;
+        }
+      }
+#endif
     } // switch
   }
 
@@ -722,6 +731,15 @@
         // String intrinsics are large and have loops.
         return false;
       }
+#if INCLUDE_RTM_OPT
+      case Op_FastLock:
+      case Op_FastUnlock: {
+        // Don't unroll RTM locking code because it is large.
+        if (UseRTMLocking) {
+          return false;
+        }
+      }
+#endif
     } // switch
   }
 
--- a/hotspot/src/share/vm/opto/machnode.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -53,6 +53,7 @@
 class Matcher;
 class PhaseRegAlloc;
 class RegMask;
+class RTMLockingCounters;
 class State;
 
 //---------------------------MachOper------------------------------------------
@@ -714,8 +715,9 @@
 class MachFastLockNode : public MachNode {
   virtual uint size_of() const { return sizeof(*this); } // Size is bigger
 public:
-  BiasedLockingCounters* _counters;
-
+  BiasedLockingCounters*        _counters;
+  RTMLockingCounters*       _rtm_counters; // RTM lock counters for inflated locks
+  RTMLockingCounters* _stack_rtm_counters; // RTM lock counters for stack locks
   MachFastLockNode() : MachNode() {}
 };
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -2439,6 +2439,7 @@
     }
   }
   // Next, attempt to eliminate allocations
+  _has_locks = false;
   progress = true;
   while (progress) {
     progress = false;
@@ -2457,11 +2458,13 @@
       case Node::Class_Lock:
       case Node::Class_Unlock:
         assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
+        _has_locks = true;
         break;
       default:
         assert(n->Opcode() == Op_LoopLimit ||
                n->Opcode() == Op_Opaque1   ||
-               n->Opcode() == Op_Opaque2, "unknown node type in macro list");
+               n->Opcode() == Op_Opaque2   ||
+               n->Opcode() == Op_Opaque3, "unknown node type in macro list");
       }
       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
       progress = progress || success;
@@ -2502,6 +2505,30 @@
       } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
         _igvn.replace_node(n, n->in(1));
         success = true;
+#if INCLUDE_RTM_OPT
+      } else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
+        assert(C->profile_rtm(), "should be used only in rtm deoptimization code");
+        assert((n->outcnt() == 1) && n->unique_out()->is_Cmp(), "");
+        Node* cmp = n->unique_out();
+#ifdef ASSERT
+        // Validate graph.
+        assert((cmp->outcnt() == 1) && cmp->unique_out()->is_Bool(), "");
+        BoolNode* bol = cmp->unique_out()->as_Bool();
+        assert((bol->outcnt() == 1) && bol->unique_out()->is_If() &&
+               (bol->_test._test == BoolTest::ne), "");
+        IfNode* ifn = bol->unique_out()->as_If();
+        assert((ifn->outcnt() == 2) &&
+               ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change), "");
+#endif
+        Node* repl = n->in(1);
+        if (!_has_locks) {
+          // Remove RTM state check if there are no locks in the code.
+          // Replace input to compare the same value.
+          repl = (cmp->in(1) == n) ? cmp->in(2) : cmp->in(1);
+        }
+        _igvn.replace_node(n, repl);
+        success = true;
+#endif
       }
       assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
       progress = progress || success;
--- a/hotspot/src/share/vm/opto/macro.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/macro.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -76,6 +76,8 @@
   ProjNode *_memproj_catchall;
   ProjNode *_resproj;
 
+  // Additional data collected during macro expansion
+  bool _has_locks;
 
   void expand_allocate(AllocateNode *alloc);
   void expand_allocate_array(AllocateArrayNode *alloc);
@@ -118,7 +120,7 @@
                             Node* length);
 
 public:
-  PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
+  PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) {
     _igvn.set_delay_transform(true);
   }
   void eliminate_macro_nodes();
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -42,18 +42,12 @@
 #include "runtime/handles.inline.hpp"
 #include "utilities/xmlstream.hpp"
 
-extern uint size_exception_handler();
-extern uint size_deopt_handler();
-
 #ifndef PRODUCT
 #define DEBUG_ARG(x) , x
 #else
 #define DEBUG_ARG(x)
 #endif
 
-extern int emit_exception_handler(CodeBuffer &cbuf);
-extern int emit_deopt_handler(CodeBuffer &cbuf);
-
 // Convert Nodes to instruction bits and pass off to the VM
 void Compile::Output() {
   // RootNode goes
@@ -394,6 +388,11 @@
         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
         reloc_size += mach->reloc();
         if (mach->is_MachCall()) {
+          // add size information for trampoline stub
+          // class CallStubImpl is platform-specific and defined in the *.ad files.
+          stub_size  += CallStubImpl::size_call_trampoline();
+          reloc_size += CallStubImpl::reloc_call_trampoline();
+
           MachCallNode *mcall = mach->as_MachCall();
           // This destination address is NOT PC-relative
 
@@ -1133,10 +1132,9 @@
   shorten_branches(blk_starts, code_req, locs_req, stub_req);
 
   // nmethod and CodeBuffer count stubs & constants as part of method's code.
-  int exception_handler_req = size_exception_handler();
-  int deopt_handler_req = size_deopt_handler();
-  exception_handler_req += MAX_stubs_size; // add marginal slop for handler
-  deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
+  // class HandlerImpl is platform-specific and defined in the *.ad files.
+  int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
+  int deopt_handler_req     = HandlerImpl::size_deopt_handler()     + MAX_stubs_size; // add marginal slop for handler
   stub_req += MAX_stubs_size;   // ensure per-stub margin
   code_req += MAX_inst_size;    // ensure per-instruction margin
 
@@ -1622,17 +1620,18 @@
   FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
 
   // Only java methods have exception handlers and deopt handlers
+  // class HandlerImpl is platform-specific and defined in the *.ad files.
   if (_method) {
     // Emit the exception handler code.
-    _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
+    _code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
     // Emit the deopt handler code.
-    _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
+    _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
 
     // Emit the MethodHandle deopt handler code (if required).
     if (has_method_handle_invokes()) {
       // We can use the same code as for the normal deopt handler, we
       // just need a different entry point address.
-      _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
+      _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
     }
   }
 
--- a/hotspot/src/share/vm/opto/parse.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -486,6 +486,8 @@
   // Helper function to compute array addressing
   Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
 
+  void rtm_deopt();
+
   // Pass current map to exits
   void return_current(Node* value);
 
--- a/hotspot/src/share/vm/opto/parse1.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -567,6 +567,10 @@
     set_map(entry_map);
     do_method_entry();
   }
+  if (depth() == 1) {
+    // Add check to deoptimize the nmethod if RTM state was changed
+    rtm_deopt();
+  }
 
   // Check for bailouts during method entry.
   if (failing()) {
@@ -2006,6 +2010,42 @@
   set_control( _gvn.transform(result_rgn) );
 }
 
+// Add check to deoptimize if RTM state is not ProfileRTM
+void Parse::rtm_deopt() {
+#if INCLUDE_RTM_OPT
+  if (C->profile_rtm()) {
+    assert(C->method() != NULL, "only for normal compilations");
+    assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
+    assert(depth() == 1, "generate check only for main compiled method");
+
+    // Set starting bci for uncommon trap.
+    set_parse_bci(is_osr_parse() ? osr_bci() : 0);
+
+    // Load the rtm_state from the MethodData.
+    const TypePtr* adr_type = TypeMetadataPtr::make(C->method()->method_data());
+    Node* mdo = makecon(adr_type);
+    int offset = MethodData::rtm_state_offset_in_bytes();
+    Node* adr_node = basic_plus_adr(mdo, mdo, offset);
+    Node* rtm_state = make_load(control(), adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
+
+    // Separate Load from Cmp by Opaque.
+    // In expand_macro_nodes() it will be replaced either
+    // with this load when there are locks in the code
+    // or with ProfileRTM (cmp->in(2)) otherwise so that
+    // the check will fold.
+    Node* profile_state = makecon(TypeInt::make(ProfileRTM));
+    Node* opq   = _gvn.transform( new (C) Opaque3Node(C, rtm_state, Opaque3Node::RTM_OPT) );
+    Node* chk   = _gvn.transform( new (C) CmpINode(opq, profile_state) );
+    Node* tst   = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) );
+    // Branch to failure if state was changed
+    { BuildCutout unless(this, tst, PROB_ALWAYS);
+      uncommon_trap(Deoptimization::Reason_rtm_state_change,
+                    Deoptimization::Action_make_not_entrant);
+    }
+  }
+#endif
+}
+
 //------------------------------return_current---------------------------------
 // Append current _map to _exit_return
 void Parse::return_current(Node* value) {
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1310,6 +1310,14 @@
         tty->print_cr("%s", c->name());
         blc->print_on(tty);
       }
+#if INCLUDE_RTM_OPT
+    } else if (c->tag() == NamedCounter::RTMLockingCounter) {
+      RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters();
+      if (rlc->nonzero()) {
+        tty->print_cr("%s", c->name());
+        rlc->print_on(tty);
+      }
+#endif
     }
     c = c->next();
   }
@@ -1349,6 +1357,8 @@
   NamedCounter* c;
   if (tag == NamedCounter::BiasedLockingCounter) {
     c = new BiasedLockingNamedCounter(strdup(st.as_string()));
+  } else if (tag == NamedCounter::RTMLockingCounter) {
+    c = new RTMLockingNamedCounter(strdup(st.as_string()));
   } else {
     c = new NamedCounter(strdup(st.as_string()), tag);
   }
@@ -1357,6 +1367,7 @@
   // add counters so this is safe.
   NamedCounter* head;
   do {
+    c->set_next(NULL);
     head = _named_counters;
     c->set_next(head);
   } while (Atomic::cmpxchg_ptr(c, &_named_counters, head) != head);
--- a/hotspot/src/share/vm/opto/runtime.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -29,6 +29,7 @@
 #include "opto/machnode.hpp"
 #include "opto/type.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/rtmLocking.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/vframe.hpp"
 
@@ -61,7 +62,8 @@
     NoTag,
     LockCounter,
     EliminatedLockCounter,
-    BiasedLockingCounter
+    BiasedLockingCounter,
+    RTMLockingCounter
   };
 
 private:
@@ -85,7 +87,7 @@
 
   NamedCounter* next() const    { return _next; }
   void set_next(NamedCounter* next) {
-    assert(_next == NULL, "already set");
+    assert(_next == NULL || next == NULL, "already set");
     _next = next;
   }
 
@@ -102,6 +104,18 @@
   BiasedLockingCounters* counters() { return &_counters; }
 };
 
+
+class RTMLockingNamedCounter : public NamedCounter {
+ private:
+ RTMLockingCounters _counters;
+
+ public:
+  RTMLockingNamedCounter(const char *n) :
+    NamedCounter(n, RTMLockingCounter), _counters() {}
+
+  RTMLockingCounters* counters() { return &_counters; }
+};
+
 typedef const TypeFunc*(*TypeFunc_generator)();
 
 class OptoRuntime : public AllStatic {
--- a/hotspot/src/share/vm/opto/type.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -4380,7 +4380,7 @@
       // else fall through:
     case TopPTR:
     case AnyNull: {
-      return make(ptr, NULL, offset);
+      return make(ptr, _metadata, offset);
     }
     case BotPTR:
     case NotNull:
--- a/hotspot/src/share/vm/opto/type.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1716,6 +1716,7 @@
 #define ConvL2X(x)   (x)
 #define ConvX2I(x)   ConvL2I(x)
 #define ConvX2L(x)   (x)
+#define ConvX2UL(x)  (x)
 
 #else
 
@@ -1760,6 +1761,7 @@
 #define ConvL2X(x)   ConvL2I(x)
 #define ConvX2I(x)   (x)
 #define ConvX2L(x)   ConvI2L(x)
+#define ConvX2UL(x)  ConvI2UL(x)
 
 #endif
 
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -500,6 +500,54 @@
   c = *p;
 WB_END
 
+WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o))
+  const char* cpu_features = VM_Version::cpu_features();
+  ThreadToNativeFromVM ttn(thread);
+  jstring features_string = env->NewStringUTF(cpu_features);
+
+  CHECK_JNI_EXCEPTION_(env, NULL);
+
+  return features_string;
+WB_END
+
+
+WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
+  ResourceMark rm(THREAD);
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
+  jobjectArray result = NULL;
+  if (code == NULL) {
+    return result;
+  }
+  int insts_size = code->insts_size();
+
+  ThreadToNativeFromVM ttn(thread);
+  jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  result = env->NewObjectArray(2, clazz, NULL);
+  if (result == NULL) {
+    return result;
+  }
+
+  clazz = env->FindClass(vmSymbols::java_lang_Integer()->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  jmethodID constructor = env->GetMethodID(clazz, vmSymbols::object_initializer_name()->as_C_string(), vmSymbols::int_void_signature()->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  jobject obj = env->NewObject(clazz, constructor, code->comp_level());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  env->SetObjectArrayElement(result, 0, obj);
+
+  jbyteArray insts = env->NewByteArray(insts_size);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
+  env->SetObjectArrayElement(result, 1, insts);
+
+  return result;
+WB_END
+
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -611,6 +659,9 @@
   {CC"isInStringTable",   CC"(Ljava/lang/String;)Z",  (void*)&WB_IsInStringTable  },
   {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
   {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
+  {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
+  {CC"getNMethod",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
+                                                      (void*)&WB_GetNMethod         },
 };
 
 #undef CC
--- a/hotspot/src/share/vm/prims/whitebox.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/prims/whitebox.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@
   do {                                                                 \
     JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
     if (HAS_PENDING_EXCEPTION) {                                       \
-      CLEAR_PENDING_EXCEPTION;                                         \
       return(value);                                                   \
     }                                                                  \
   } while (0)
@@ -49,7 +48,6 @@
   do {                                                                 \
     JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
     if (HAS_PENDING_EXCEPTION) {                                       \
-      CLEAR_PENDING_EXCEPTION;                                         \
       return;                                                          \
     }                                                                  \
   } while (0)
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -301,6 +301,7 @@
   { "UseMPSS",                       JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseStringCache",                JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseOldInlining",                JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "SafepointPollOffset",           JDK_Version::jdk(9), JDK_Version::jdk(10) },
 #ifdef PRODUCT
   { "DesiredMethodLimit",
                            JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -3779,9 +3780,6 @@
 #endif // CC_INTERP
 
 #ifdef COMPILER2
-  if (!UseBiasedLocking || EmitSync != 0) {
-    UseOptoBiasInlining = false;
-  }
   if (!EliminateLocks) {
     EliminateNestedLocks = false;
   }
@@ -3842,6 +3840,11 @@
       UseBiasedLocking = false;
     }
   }
+#ifdef COMPILER2
+  if (!UseBiasedLocking || EmitSync != 0) {
+    UseOptoBiasInlining = false;
+  }
+#endif
 
   return JNI_OK;
 }
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -1288,7 +1288,8 @@
     gather_statistics(reason, action, trap_bc);
 
     // Ensure that we can record deopt. history:
-    bool create_if_missing = ProfileTraps;
+    // Need MDO to record RTM code generation state.
+    bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking );
 
     MethodData* trap_mdo =
       get_method_data(thread, trap_method, create_if_missing);
@@ -1569,6 +1570,17 @@
         if (tstate1 != tstate0)
           pdata->set_trap_state(tstate1);
       }
+
+#if INCLUDE_RTM_OPT
+      // Restart collecting RTM locking abort statistic if the method
+      // is recompiled for a reason other than RTM state change.
+      // Assume that in new recompiled code the statistic could be different,
+      // for example, due to different inlining.
+      if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
+          UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) {
+        trap_mdo->atomic_set_rtm_state(ProfileRTM);
+      }
+#endif
     }
 
     if (inc_recompile_count) {
@@ -1826,7 +1838,8 @@
   "age",
   "predicate",
   "loop_limit_check",
-  "speculate_class_check"
+  "speculate_class_check",
+  "rtm_state_change"
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
   // Note:  Keep this in sync. with enum DeoptAction.
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -60,6 +60,7 @@
     Reason_predicate,             // compiler generated predicate failed
     Reason_loop_limit_check,      // compiler generated loop limits check failed
     Reason_speculate_class_check, // saw unexpected object class from type speculation
+    Reason_rtm_state_change,      // rtm state change detected
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
     Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
--- a/hotspot/src/share/vm/runtime/java.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -265,7 +265,7 @@
     os::print_statistics();
   }
 
-  if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics) {
+  if (PrintLockStatistics || PrintPreciseBiasedLockingStatistics || PrintPreciseRTMLockingStatistics) {
     OptoRuntime::print_named_counters();
   }
 
@@ -387,7 +387,7 @@
   }
 
 #ifdef COMPILER2
-  if (PrintPreciseBiasedLockingStatistics) {
+  if (PrintPreciseBiasedLockingStatistics || PrintPreciseRTMLockingStatistics) {
     OptoRuntime::print_named_counters();
   }
 #endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/rtmLocking.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_RTMLOCKING_HPP
+#define SHARE_VM_RUNTIME_RTMLOCKING_HPP
+
+// Generate RTM (Restricted Transactional Memory) locking code for all inflated
+// locks when "UseRTMLocking" option is on with normal locking mechanism as fall back
+// handler.
+//
+// On abort/lock busy the lock will be retried a fixed number of times under RTM
+// as specified by "RTMRetryCount" option. The locks which abort too often
+// can be auto tuned or manually tuned.
+//
+// Auto-tuning can be done on an option like UseRTMDeopt and it will need abort
+// ratio calculation for each lock. The abort ratio will be calculated after
+// "RTMAbortThreshold" number of aborts is reached. The formulas are:
+//
+//     Aborted transactions = abort_count * 100
+//     All transactions = total_count *  RTMTotalCountIncrRate
+//
+//     Aborted transactions >= All transactions * RTMAbortRatio
+//
+// If "UseRTMDeopt" is on and the aborts ratio reaches "RTMAbortRatio"
+// the method containing the lock will be deoptimized and recompiled with
+// all locks as normal locks. If the abort ratio continues to remain low after
+// "RTMLockingThreshold" locks are attempted, then the method will be deoptimized
+// and recompiled with all locks as RTM locks without abort ratio calculation code.
+// The abort ratio calculation can be delayed by specifying flag
+// -XX:RTMLockingCalculationDelay in millisecond.
+//
+// For manual tuning the abort statistics for each lock needs to be provided
+// to the user on some JVM option like "PrintPreciseRTMLockingStatistics".
+// Based on the abort statistics users can create a .hotspot_compiler file
+// or use -XX:CompileCommand=option,class::method,NoRTMLockEliding
+// to specify for which methods to disable RTM locking.
+//
+// When UseRTMForStackLocks option is enabled along with UseRTMLocking option,
+// the RTM locking code is generated for stack locks too.
+// The retries, auto-tuning support and rtm locking statistics are all
+// supported for stack locks just like inflated locks.
+
+// RTM locking counters
+class RTMLockingCounters VALUE_OBJ_CLASS_SPEC {
+ private:
+  uintx _total_count; // Total RTM locks count
+  uintx _abort_count; // Total aborts count
+
+ public:
+  enum { ABORT_STATUS_LIMIT = 6 };
+  // Counters per RTM Abort Status. Incremented with +PrintPreciseRTMLockingStatistics
+  // RTM uses the EAX register to communicate abort status to software.
+  // Following an RTM abort the EAX register has the following definition.
+  //
+  //   EAX register bit position   Meaning
+  //     0     Set if abort caused by XABORT instruction.
+  //     1     If set, the transaction may succeed on a retry. This bit is always clear if bit 0 is set.
+  //     2     Set if another logical processor conflicted with a memory address that was part of the transaction that aborted.
+  //     3     Set if an internal buffer overflowed.
+  //     4     Set if a debug breakpoint was hit.
+  //     5     Set if an abort occurred during execution of a nested transaction.
+ private:
+  uintx _abortX_count[ABORT_STATUS_LIMIT];
+
+ public:
+  static uintx _calculation_flag;
+  static uintx* rtm_calculation_flag_addr() { return &_calculation_flag; }
+
+  static void init();
+
+  RTMLockingCounters() : _total_count(0), _abort_count(0) {
+    for (int i = 0; i < ABORT_STATUS_LIMIT; i++) {
+      _abortX_count[i] = 0;
+    }
+  }
+
+  uintx* total_count_addr()               { return &_total_count; }
+  uintx* abort_count_addr()               { return &_abort_count; }
+  uintx* abortX_count_addr()              { return &_abortX_count[0]; }
+
+  static int total_count_offset()         { return (int)offset_of(RTMLockingCounters, _total_count); }
+  static int abort_count_offset()         { return (int)offset_of(RTMLockingCounters, _abort_count); }
+  static int abortX_count_offset()        { return (int)offset_of(RTMLockingCounters, _abortX_count[0]); }
+
+
+  bool nonzero() {  return (_abort_count + _total_count) > 0; }
+
+  void print_on(outputStream* st);
+  void print() { print_on(tty); }
+};
+
+#endif // SHARE_VM_RUNTIME_RTMLOCKING_HPP
--- a/hotspot/src/share/vm/runtime/task.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/task.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -105,7 +105,6 @@
   _counter(0), _interval((int) interval_time) {
   // Sanity check the interval time
   assert(_interval >= PeriodicTask::min_interval &&
-         _interval <= PeriodicTask::max_interval &&
          _interval %  PeriodicTask::interval_gran == 0,
               "improper PeriodicTask interval time");
 }
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Mar 27 14:15:42 2014 +0100
@@ -107,6 +107,9 @@
 #include "opto/c2compiler.hpp"
 #include "opto/idealGraphPrinter.hpp"
 #endif
+#if INCLUDE_RTM_OPT
+#include "runtime/rtmLocking.hpp"
+#endif
 
 #ifdef DTRACE_ENABLED
 
@@ -3622,6 +3625,10 @@
 
   BiasedLocking::init();
 
+#if INCLUDE_RTM_OPT
+  RTMLockingCounters::init();
+#endif
+
   if (JDK_Version::current().post_vm_init_hook_enabled()) {
     call_postVMInitHook(THREAD);
     // The Java side of PostVMInitHook.run must deal with all
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Mar 27 14:15:42 2014 +0100
@@ -373,6 +373,21 @@
 
 // Machine dependent stuff
 
+#if defined(X86) && defined(COMPILER2) && !defined(JAVASE_EMBEDDED)
+// Include Restricted Transactional Memory lock eliding optimization
+#define INCLUDE_RTM_OPT 1
+#define RTM_OPT_ONLY(code) code
+#else
+#define INCLUDE_RTM_OPT 0
+#define RTM_OPT_ONLY(code)
+#endif
+// States of Restricted Transactional Memory usage.
+enum RTMState {
+  NoRTM      = 0x2, // Don't use RTM
+  UseRTM     = 0x1, // Use RTM
+  ProfileRTM = 0x0  // Use RTM with abort ratio calculation
+};
+
 #ifdef TARGET_ARCH_x86
 # include "globalDefinitions_x86.hpp"
 #endif
--- a/hotspot/test/compiler/6792161/Test6792161.java	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/test/compiler/6792161/Test6792161.java	Thu Mar 27 14:15:42 2014 +0100
@@ -27,7 +27,7 @@
  * @bug 6792161
  * @summary assert("No dead instructions after post-alloc")
  *
- * @run main/othervm/timeout=300 -Xcomp -XX:MaxInlineSize=120 Test6792161
+ * @run main/othervm/timeout=600 -Xcomp -XX:MaxInlineSize=120 Test6792161
  */
 
 import java.lang.reflect.Constructor;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/BMITestRunner.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.util.*;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.nio.charset.StandardCharsets;
+
+import com.oracle.java.testlibrary.*;
+
+/**
+ * Test runner that invokes all methods implemented by particular Expr
+ * with random arguments in two different JVM processes and compares output.
+ * JVMs being started in different modes - one in int and other in comp
+ * with C2 and disabled tiered compilation.
+ */
+public class BMITestRunner {
+
+    enum VMMode {
+        COMP, INT;
+    };
+
+    public static int DEFAULT_ITERATIONS_COUNT = 4000;
+
+    /**
+     * Execute all methods implemented by <b>expr</b> in int and comp modes
+     * and compare output.
+     * Test pass only of output obtained with different VM modes is equal.
+     * To control behaviour of test following options could be passed:
+     * <ul>
+     *   <li>-iterations=&lt;N&gt; each operation implemented by
+     *       <b>expr</b> will be executed <i>N</i> times. Default value
+     *       is 4000.</li>
+     *   <li>-seed=&lt;SEED&gt; arguments for <b>expr</b>'s methods
+     *       obtained via RNG initiated with seed <i>SEED</i>. By default
+     *       some random seed will be used.</li>
+     * </ul>
+     *
+     * @param expr operation that should be tested
+     * @param testOpts options to control test behaviour
+     * @param additionalVMOpts additional options for VM
+     *
+     * @throws Throwable if test failed.
+     */
+    public static void runTests(Class<? extends Expr> expr,
+                                String testOpts[],
+                                String... additionalVMOpts)
+                         throws Throwable {
+
+        int seed = new Random().nextInt();
+        int iterations = DEFAULT_ITERATIONS_COUNT;
+
+        for (String testOption : testOpts) {
+            if (testOption.startsWith("-iterations=")) {
+                iterations = Integer.valueOf(testOption.
+                                             replace("-iterations=", ""));
+            } else if (testOption.startsWith("-seed=")) {
+                seed = Integer.valueOf(testOption.replace("-seed=", ""));
+            }
+        }
+
+        System.out.println("Running test with seed: " + seed);
+
+        OutputAnalyzer intOutput = runTest(expr, VMMode.INT,
+                                           additionalVMOpts,
+                                           seed, iterations);
+        OutputAnalyzer compOutput = runTest(expr, VMMode.COMP,
+                                            additionalVMOpts,
+                                            seed, iterations);
+
+        dumpOutput(intOutput, "int");
+        dumpOutput(compOutput, "comp");
+
+        Asserts.assertStringsEqual(intOutput.getStdout(),
+                                   compOutput.getStdout(),
+                                   "Results obtained in -Xint and " +
+                                   "-Xcomp should be the same.");
+    }
+
+    /**
+     * Execute tests on methods implemented by <b>expr</b> in new VM
+     * started in <b>testVMMode</b> mode.
+     *
+     * @param expr operation that should be tested
+     * @param testVMMode VM mode for test
+     * @param additionalVMOpts additional options for VM
+     * @param seed for RNG used it tests
+     * @param iterations that will be used to invoke <b>expr</b>'s methods.
+     *
+     * @return OutputAnalyzer for executed test.
+     * @throws Throwable when something goes wrong.
+     */
+    public static OutputAnalyzer runTest(Class<? extends Expr> expr,
+                                         VMMode testVMMode,
+                                         String additionalVMOpts[],
+                                         int seed, int iterations)
+                                  throws Throwable {
+
+        List<String> vmOpts = new LinkedList<String>();
+
+        Collections.addAll(vmOpts, additionalVMOpts);
+
+        //setup mode-specific options
+        switch (testVMMode) {
+        case INT:
+            Collections.addAll(vmOpts, new String[] { "-Xint" });
+            break;
+        case COMP:
+            Collections.addAll(vmOpts, new String[] {
+                    "-Xcomp",
+                    "-XX:-TieredCompilation",
+                    String.format("-XX:CompileCommand=compileonly,%s::*",
+                                  expr.getName())
+                });
+            break;
+        }
+
+        Collections.addAll(vmOpts, new String[] {
+                "-XX:+DisplayVMOutputToStderr",
+                Executor.class.getName(),
+                expr.getName(),
+                new Integer(seed).toString(),
+                new Integer(iterations).toString()
+            });
+
+        OutputAnalyzer outputAnalyzer = ProcessTools.
+            executeTestJvm(vmOpts.toArray(new String[vmOpts.size()]));
+
+        outputAnalyzer.shouldHaveExitValue(0);
+
+        return outputAnalyzer;
+    }
+
+    /**
+     * Dump stdout and stderr of test process to <i>prefix</i>.test.out
+     * and <i>prefix</i>.test.err respectively.
+     *
+     * @param outputAnalyzer OutputAnalyzer whom output should be dumped
+     * @param prefix Prefix that will be used in file names.
+     * @throws IOException if unable to dump output to file.
+     */
+    protected static void dumpOutput(OutputAnalyzer outputAnalyzer,
+                                     String prefix)
+                              throws IOException {
+        Files.write(Paths.get(prefix + ".test.out"),
+                    outputAnalyzer.getStdout().getBytes());
+
+        Files.write(Paths.get(prefix + ".test.err"),
+                    outputAnalyzer.getStderr().getBytes());
+    }
+
+
+    /**
+     * Executor that invoke all methods implemented by particular
+     * Expr instance.
+     */
+    public static class Executor {
+
+        /**
+         * Usage: BMITestRunner$Executor <ExprClassName> <seed> <iterations>
+         */
+        public static void main(String args[]) throws Exception {
+            @SuppressWarnings("unchecked")
+            Class<? extends Expr> exprClass =
+                (Class<? extends Expr>)Class.forName(args[0]);
+            Expr expr = exprClass.getConstructor().newInstance();
+            Random rng = new Random(Integer.valueOf(args[1]));
+            int iterations = Integer.valueOf(args[2]);
+            runTests(expr, iterations, rng);
+        }
+
+
+        public static int[] getIntBitShifts() {
+            //SIZE+1 shift is for zero.
+            int data[] = new int[Integer.SIZE+1];
+            for (int s = 0; s < data.length; s++) {
+                data[s] = 1<<s;
+            }
+            return data;
+        }
+
+        public static long[] getLongBitShifts() {
+            //SIZE+1 shift is for zero.
+            long data[] = new long[Long.SIZE+1];
+            for (int s = 0; s < data.length; s++) {
+                data[s] = 1L<<s;
+            }
+            return data;
+        }
+
+        public static void log(String format, Object... args) {
+            System.out.println(String.format(format, args));
+        }
+
+        public static void runTests(Expr expr, int iterations, Random rng) {
+            runUnaryIntRegTest(expr, iterations, rng);
+            runUnaryIntMemTest(expr, iterations, rng);
+            runUnaryLongRegTest(expr, iterations, rng);
+            runUnaryLongMemTest(expr, iterations, rng);
+            runBinaryRegRegIntTest(expr, iterations, rng);
+            runBinaryRegMemIntTest(expr, iterations, rng);
+            runBinaryMemRegIntTest(expr, iterations, rng);
+            runBinaryMemMemIntTest(expr, iterations, rng);
+            runBinaryRegRegLongTest(expr, iterations, rng);
+            runBinaryRegMemLongTest(expr, iterations, rng);
+            runBinaryMemRegLongTest(expr, iterations, rng);
+            runBinaryMemMemLongTest(expr, iterations, rng);
+        }
+
+        public static void runUnaryIntRegTest(Expr expr, int iterations,
+                                              Random rng) {
+            if (!(expr.isUnaryArgumentSupported()
+                  && expr.isIntExprSupported())) {
+                return;
+            }
+
+            for (int value : getIntBitShifts()) {
+                log("UnaryIntReg(0X%x) -> 0X%x",
+                    value, expr.intExpr(value));
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int value = rng.nextInt();
+                log("UnaryIntReg(0X%x) -> 0X%x",
+                    value, expr.intExpr(value));
+            }
+        }
+
+        public static void runUnaryIntMemTest(Expr expr, int iterations,
+                                              Random rng) {
+            if (!(expr.isUnaryArgumentSupported()
+                  && expr.isIntExprSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int value : getIntBitShifts()) {
+                log("UnaryIntMem(0X%x) -> 0X%x",
+                    value, expr.intExpr(new Expr.MemI(value)));
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int value = rng.nextInt();
+                log("UnaryIntMem(0X%x) -> 0X%x",
+                    value, expr.intExpr(new Expr.MemI(value)));
+            }
+        }
+
+        public static void runUnaryLongRegTest(Expr expr, int iterations,
+                                               Random rng) {
+            if (!(expr.isUnaryArgumentSupported()
+                  && expr.isLongExprSupported())) {
+                return;
+            }
+
+            for (long value : getLongBitShifts()) {
+                log("UnaryLongReg(0X%x) -> 0X%x",
+                    value, expr.longExpr(value));
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long value = rng.nextLong();
+                log("UnaryLongReg(0X%x) -> 0X%x",
+                    value, expr.longExpr(value));
+            }
+        }
+
+        public static void runUnaryLongMemTest(Expr expr, int iterations,
+                                               Random rng) {
+            if (!(expr.isUnaryArgumentSupported()
+                  && expr.isLongExprSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (long value : getLongBitShifts()) {
+                log("UnaryLongMem(0X%x) -> 0X%x",
+                    value, expr.longExpr(new Expr.MemL(value)));
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long value = rng.nextLong();
+                log("UnaryLongMem(0X%x) -> 0X%x",
+                    value, expr.longExpr(new Expr.MemL(value)));
+            }
+        }
+
+        public static void runBinaryRegRegIntTest(Expr expr, int iterations,
+                                                  Random rng) {
+            if (!(expr.isIntExprSupported()
+                  && expr.isBinaryArgumentSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int aValue = rng.nextInt();
+                int bValue = rng.nextInt();
+                log("BinaryIntRegReg(0X%x, 0X%x) -> 0X%x",
+                    aValue, bValue, expr.intExpr(aValue, bValue));
+            }
+        }
+
+        public static void runBinaryRegMemIntTest(Expr expr, int iterations,
+                                                  Random rng) {
+            if (!(expr.isIntExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int aValue = rng.nextInt();
+                int bValue = rng.nextInt();
+                log("BinaryIntRegMem(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.intExpr(aValue, new Expr.MemI(bValue)));
+            }
+        }
+
+        public static void runBinaryMemRegIntTest(Expr expr, int iterations,
+                                                  Random rng) {
+            if (!(expr.isIntExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int aValue = rng.nextInt();
+                int bValue = rng.nextInt();
+                log("BinaryIntMemReg(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.intExpr(new Expr.MemI(aValue), bValue));
+            }
+        }
+
+        public static void runBinaryMemMemIntTest(Expr expr, int iterations,
+                                                  Random rng) {
+            if (!(expr.isIntExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                int aValue = rng.nextInt();
+                int bValue = rng.nextInt();
+                log("BinaryIntMemMem(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.intExpr(new Expr.MemI(aValue),
+                                 new Expr.MemI(bValue)));
+            }
+        }
+
+        public static void runBinaryRegRegLongTest(Expr expr,
+                                                   int iterations,
+                                                   Random rng) {
+            if (!(expr.isLongExprSupported()
+                  && expr.isBinaryArgumentSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long aValue = rng.nextLong();
+                long bValue = rng.nextLong();
+                log("BinaryLongRegReg(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.longExpr(aValue, bValue));
+            }
+        }
+
+        public static void runBinaryRegMemLongTest(Expr expr,
+                                                   int iterations,
+                                                   Random rng) {
+            if (!(expr.isLongExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long aValue = rng.nextLong();
+                long bValue = rng.nextLong();
+                log("BinaryLongRegMem(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.longExpr(aValue, new Expr.MemL(bValue)));
+            }
+        }
+
+        public static void runBinaryMemRegLongTest(Expr expr,
+                                                   int iterations,
+                                                   Random rng) {
+            if (!(expr.isLongExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long aValue = rng.nextLong();
+                long bValue = rng.nextLong();
+                log("BinaryLongMemReg(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.longExpr(new Expr.MemL(aValue), bValue));
+            }
+        }
+
+        public static void runBinaryMemMemLongTest(Expr expr,
+                                                   int iterations,
+                                                   Random rng) {
+            if (!(expr.isLongExprSupported()
+                  && expr.isBinaryArgumentSupported()
+                  && expr.isMemExprSupported())) {
+                return;
+            }
+
+            for (int i = 0; i < iterations; i++) {
+                long aValue = rng.nextLong();
+                long bValue = rng.nextLong();
+                log("BinaryLongMemMem(0X%x, 0X%x) -> 0X%x", aValue, bValue,
+                    expr.longExpr(new Expr.MemL(aValue),
+                                  new Expr.MemL(bValue)));
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/Expr.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * Expression that should be replaced by particular instrinsic
+ * or intruction during compilation.
+ */
+
+public abstract class Expr {
+
+    public static class MemI {
+        public MemI(int i) {
+            this.value = i;
+        }
+
+        public int value;
+    }
+
+    public static class MemL {
+        public MemL(long l) {
+            this.value = l;
+        }
+
+        public long value;
+    }
+
+    public boolean isUnaryArgumentSupported() {
+        return false;
+    }
+
+    public boolean isIntExprSupported() {
+        return false;
+    }
+
+    public boolean isBinaryArgumentSupported() {
+        return false;
+    }
+
+    public boolean isLongExprSupported() {
+        return false;
+    }
+
+    public boolean isMemExprSupported() {
+        return false;
+    }
+
+    public int intExpr(int reg) {
+        throw new UnsupportedOperationException();
+    }
+
+    public int intExpr(MemI mem) {
+        throw new UnsupportedOperationException();
+    }
+
+    public int intExpr(int a, int b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public int intExpr(int a, MemI b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public int intExpr(MemI a, int b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public int intExpr(MemI a, MemI b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(long reg) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(MemL mem) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(long a, long b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(long a, MemL b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(MemL a, long b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public long longExpr(MemL a, MemL b) {
+        throw new UnsupportedOperationException();
+    }
+
+    public static class BMIExpr extends Expr {
+
+        public boolean isMemExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BMIBinaryExpr extends BMIExpr {
+
+        public boolean isBinaryArgumentSupported() {
+            return true;
+        }
+
+    }
+
+    public static class BMIUnaryExpr extends BMIExpr {
+        public boolean isUnaryArgumentSupported() {
+            return true;
+        }
+    }
+
+    public static class BMIBinaryIntExpr extends BMIBinaryExpr {
+        public boolean isIntExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BMIBinaryLongExpr extends BMIBinaryExpr {
+        public boolean isLongExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BMIUnaryIntExpr extends BMIUnaryExpr {
+        public boolean isIntExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BMIUnaryLongExpr extends BMIUnaryExpr {
+        public boolean isLongExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BitCountingExpr extends Expr {
+        public boolean isUnaryArgumentSupported() {
+            return true;
+        }
+    }
+
+    public static class BitCountingIntExpr extends BitCountingExpr {
+        public boolean isIntExprSupported() {
+            return true;
+        }
+    }
+
+    public static class BitCountingLongExpr extends BitCountingExpr {
+        public boolean isLongExprSupported() {
+            return true;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of ANDN instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestAndnI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestAndnI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestAndnI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. "+
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(AndnIExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(AndnICommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class AndnIExpr extends Expr.BMIBinaryIntExpr {
+
+        public int intExpr(int src1, int src2) {
+            return ~src1 & src2;
+        }
+
+        public int intExpr(int src1, Expr.MemI src2) {
+            return ~src1 & src2.value;
+        }
+
+        public int intExpr(Expr.MemI src1, int src2) {
+            return ~src1.value & src2;
+        }
+
+        public int intExpr(Expr.MemI src1, Expr.MemI src2) {
+            return ~src1.value & src2.value;
+        }
+    }
+
+    public static class AndnICommutativeExpr extends Expr.BMIBinaryIntExpr {
+
+        public int intExpr(int src1, int src2) {
+            return src1 & ~src2;
+        }
+
+        public int intExpr(int src1, Expr.MemI src2) {
+            return src1 & ~src2.value;
+        }
+
+        public int intExpr(Expr.MemI src1, int src2) {
+            return src1.value & ~src2;
+        }
+
+        public int intExpr(Expr.MemI src1, Expr.MemI src2) {
+            return src1.value & ~src2.value;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestAndnL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of ANDN instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestAndnL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestAndnL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestAndnL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(AndnLExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(AndnLCommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class AndnLExpr extends Expr.BMIBinaryLongExpr {
+
+        public long longExpr(long src1, long src2) {
+            return ~src1 & src2;
+        }
+
+        public long longExpr(long src1, Expr.MemL src2) {
+            return ~src1 & src2.value;
+        }
+
+        public long longExpr(Expr.MemL src1, long src2) {
+            return ~src1.value & src2;
+        }
+
+        public long longExpr(Expr.MemL src1, Expr.MemL src2) {
+            return ~src1.value & src2.value;
+        }
+
+
+    }
+
+    public static class AndnLCommutativeExpr extends Expr.BMIBinaryLongExpr {
+
+        public long longExpr(long src1, long src2) {
+            return src1 & ~src2;
+        }
+
+        public long longExpr(long src1, Expr.MemL src2) {
+            return src1 & ~src2.value;
+        }
+
+        public long longExpr(Expr.MemL src1, long src2) {
+            return src1.value & ~src2;
+        }
+
+        public long longExpr(Expr.MemL src1, Expr.MemL src2) {
+            return src1.value & ~src2.value;
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSI instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsiI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsiI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsiI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsiIExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsiICommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsiIExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return -src & src;
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return -src.value & src.value;
+        }
+
+    }
+
+    public static class BlsiICommutativeExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return src & -src;
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return src.value & -src.value;
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsiL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSI instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsiL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsiL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsiL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsiLExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsiLCommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsiLExpr extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return -src & src;
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return -src.value & src.value;
+        }
+
+    }
+
+    public static class BlsiLCommutativeExpr extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return src & -src;
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return src.value & -src.value;
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSMSK instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsmskI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsmskI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsmskI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsmskIExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsmskICommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsmskIExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return (src - 1) ^ src;
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return (src.value - 1) ^ src.value;
+        }
+
+    }
+
+    public static class BlsmskICommutativeExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return src ^ (src - 1);
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return src.value ^ (src.value - 1);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsmskL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSMSK instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsmskL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsmskL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsmskL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsmskLExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsmskLCommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsmskLExpr
+        extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return (src - 1) ^ src;
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return (src.value - 1) ^ src.value;
+        }
+
+    }
+
+    public static class BlsmskLCommutativeExpr
+        extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return src ^ (src - 1);
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return src.value ^ (src.value - 1);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSR instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsrI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsrI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsrI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsrIExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsrICommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsrIExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return (src - 1) & src;
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return (src.value - 1) & src.value;
+        }
+
+    }
+
+    public static class BlsrICommutativeExpr extends Expr.BMIUnaryIntExpr {
+
+        public int intExpr(int src) {
+            return src & (src - 1);
+        }
+
+        public int intExpr(Expr.MemI src) {
+            return src.value & (src.value - 1);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestBlsrL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of BLSR instruction
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestBlsrL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestBlsrL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestBlsrL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(BlsrLExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+        BMITestRunner.runTests(BlsrLCommutativeExpr.class, args,
+                               "-XX:+UseBMI1Instructions");
+    }
+
+    public static class BlsrLExpr extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return (src - 1) & src;
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return (src.value - 1) & src.value;
+        }
+
+    }
+
+    public static class BlsrLCommutativeExpr extends Expr.BMIUnaryLongExpr {
+
+        public long longExpr(long src) {
+            return src & (src - 1);
+        }
+
+        public long longExpr(Expr.MemL src) {
+            return src.value & (src.value - 1);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of intrinsic
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestLzcntI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestLzcntI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestLzcntI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("lzcnt")) {
+            System.out.println("CPU does not support lzcnt feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(LzcntIExpr.class, args,
+                               "-XX:+UseCountLeadingZerosInstruction");
+    }
+
+    public static class LzcntIExpr extends Expr.BitCountingIntExpr {
+
+        public int intExpr(int src) {
+            return Integer.numberOfLeadingZeros(src);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestLzcntL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of intrinsic
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestLzcntL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestLzcntL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestLzcntL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("lzcnt")) {
+            System.out.println("CPU does not support lzcnt feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(LzcntLExpr.class, args,
+                               "-XX:+UseCountLeadingZerosInstruction");
+    }
+
+    public static class LzcntLExpr extends Expr.BitCountingLongExpr {
+
+        public long longExpr(long src) {
+            return Long.numberOfLeadingZeros(src);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntI.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of intrinsic
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestTzcntI BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestTzcntI
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestTzcntI {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(TzcntIExpr.class, args,
+                               "-XX:+UseCountTrailingZerosInstruction");
+    }
+
+    public static class TzcntIExpr extends Expr.BitCountingIntExpr {
+
+        public int intExpr(int src) {
+            return Integer.numberOfTrailingZeros(src);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/bmi/TestTzcntL.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8031321
+ * @summary Verify that results of computations are the same w/
+ *          and w/o usage of intrinsic
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestTzcntL BMITestRunner Expr
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestTzcntL
+ */
+
+import sun.hotspot.cpuinfo.CPUInfo;
+
+public class TestTzcntL {
+
+    public static void main(String args[]) throws Throwable {
+        if (!CPUInfo.hasFeature("bmi1")) {
+            System.out.println("CPU does not support bmi1 feature. " +
+                               "Test skipped.");
+            return;
+        }
+
+        BMITestRunner.runTests(TzcntLExpr.class, args,
+                               "-XX:+UseCountTrailingZerosInstruction");
+    }
+
+    public static class TzcntLExpr extends Expr.BitCountingLongExpr {
+
+        public long longExpr(long src) {
+            return Long.numberOfTrailingZeros(src);
+        }
+
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/unsafe/UnsafeGetAddressTest.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6653795
+ * @summary C2 intrinsic for Unsafe.getAddress performs pointer sign extension on 32-bit systems
+ * @run main UnsafeGetAddressTest
+ *
+ */
+
+import sun.misc.Unsafe;
+import java.lang.reflect.*;
+
+public class UnsafeGetAddressTest {
+    private static Unsafe unsafe;
+
+    public static void main(String[] args) throws Exception {
+        Class c = UnsafeGetAddressTest.class.getClassLoader().loadClass("sun.misc.Unsafe");
+        Field f = c.getDeclaredField("theUnsafe");
+        f.setAccessible(true);
+        unsafe = (Unsafe)f.get(c);
+
+        long address = unsafe.allocateMemory(unsafe.addressSize());
+        unsafe.putAddress(address, 0x0000000080000000L);
+        // from sun.misc.Unsafe.getAddress' documentation:
+        // "If the native pointer is less than 64 bits wide, it is
+        // extended as an unsigned number to a Java long."
+        result = unsafe.getAddress(address);
+        System.out.printf("1: was 0x%x, expected 0x%x\n", result,
+                0x0000000080000000L);
+        for (int i = 0; i < 1000000; i++) {
+            result = unsafe.getAddress(address);
+        }
+
+        // The code has got compiled, check the result now
+        System.out.printf("2: was 0x%x, expected 0x%x\n", result,
+                0x0000000080000000L);
+        if (result != 0x0000000080000000L) {
+            System.out.println("Test Failed");
+            System.exit(97);
+        } else {
+            System.out.println("Test Passed");
+        }
+    }
+    static volatile long result;
+}
+
--- a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java	Thu Mar 27 14:15:42 2014 +0100
@@ -24,6 +24,7 @@
 import com.sun.management.HotSpotDiagnosticMXBean;
 import com.sun.management.VMOption;
 import sun.hotspot.WhiteBox;
+import sun.hotspot.code.NMethod;
 import sun.management.ManagementFactoryHelper;
 
 import java.lang.reflect.Constructor;
@@ -278,7 +279,8 @@
     }
 
     protected final int getCompLevel() {
-        return WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr());
+        NMethod nm = NMethod.get(method, testCase.isOsr());
+        return nm == null ? COMP_LEVEL_NONE : nm.comp_level;
     }
 
     protected final boolean isCompilable() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/whitebox/GetNMethodTest.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import sun.hotspot.code.NMethod;
+
+/*
+ * @test GetNMethodTest
+ * @bug 8038240
+ * @library /testlibrary /testlibrary/whitebox
+ * @build GetNMethodTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* GetNMethodTest
+ * @summary testing of WB::getNMethod()
+ * @author igor.ignatyev@oracle.com
+ */
+public class GetNMethodTest extends CompilerWhiteBoxTest {
+    public static void main(String[] args) throws Exception {
+        CompilerWhiteBoxTest.main(GetNMethodTest::new, args);
+    }
+
+    private GetNMethodTest(TestCase testCase) {
+        super(testCase);
+        // to prevent inlining of #method
+        WHITE_BOX.testSetDontInlineMethod(method, true);
+    }
+
+    @Override
+    protected void test() throws Exception {
+        checkNotCompiled();
+
+        compile();
+        checkCompiled();
+        NMethod nmethod = NMethod.get(method, testCase.isOsr());
+        if (IS_VERBOSE) {
+            System.out.println("nmethod = " + nmethod);
+        }
+        if (nmethod == null) {
+            throw new RuntimeException("nmethod of compiled method is null");
+        }
+        if (nmethod.insts.length == 0) {
+            throw new RuntimeException("compiled method's instructions is empty");
+        }
+        deoptimize();
+        checkNotCompiled();
+        nmethod = NMethod.get(method, testCase.isOsr());
+        if (nmethod != null) {
+            throw new RuntimeException("nmethod of non-compiled method isn't null");
+        }
+    }
+}
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/Asserts.java	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/Asserts.java	Thu Mar 27 14:15:42 2014 +0100
@@ -378,6 +378,64 @@
         }
     }
 
+    /**
+     * Asserts that two strings are equal.
+     *
+     * If strings are not equals, then exception message
+     * will contain {@code msg} followed by list of mismatched lines.
+     *
+     * @param str1 First string to compare.
+     * @param str2 Second string to compare.
+     * @param msg A description of the assumption.
+     * @throws RuntimeException if strings are not equal.
+     */
+    public static void assertStringsEqual(String str1, String str2,
+                                          String msg) {
+        String lineSeparator = System.getProperty("line.separator");
+        String str1Lines[] = str1.split(lineSeparator);
+        String str2Lines[] = str2.split(lineSeparator);
+
+        int minLength = Math.min(str1Lines.length, str2Lines.length);
+        String longestStringLines[] = ((str1Lines.length == minLength) ?
+                                       str2Lines : str1Lines);
+
+        boolean stringsAreDifferent = false;
+
+        StringBuilder messageBuilder = new StringBuilder(msg);
+
+        messageBuilder.append("\n");
+
+        for (int line = 0; line < minLength; line++) {
+            if (!str1Lines[line].equals(str2Lines[line])) {
+                messageBuilder.append(String.
+                                      format("[line %d] '%s' differs " +
+                                             "from '%s'\n",
+                                             line,
+                                             str1Lines[line],
+                                             str2Lines[line]));
+                stringsAreDifferent = true;
+            }
+        }
+
+        if (minLength < longestStringLines.length) {
+            String stringName = ((longestStringLines == str1Lines) ?
+                                 "first" : "second");
+            messageBuilder.append(String.format("Only %s string contains " +
+                                                "following lines:\n",
+                                                stringName));
+            stringsAreDifferent = true;
+            for(int line = minLength; line < longestStringLines.length; line++) {
+                messageBuilder.append(String.
+                                      format("[line %d] '%s'", line,
+                                             longestStringLines[line]));
+            }
+        }
+
+        if (stringsAreDifferent) {
+            error(messageBuilder.toString());
+        }
+    }
+
     private static <T extends Comparable<T>> int compare(T lhs, T rhs, String msg) {
         assertNotNull(lhs, msg);
         assertNotNull(rhs, msg);
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Mar 27 03:14:45 2014 -0700
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Mar 27 14:15:42 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -135,6 +135,7 @@
   public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
   public native void    clearMethodState(Executable method);
   public native int     getMethodEntryBci(Executable method);
+  public native Object[] getNMethod(Executable method, boolean isOsr);
 
   // Intered strings
   public native boolean isInStringTable(String str);
@@ -150,4 +151,7 @@
   public native void runMemoryUnitTests();
   public native void readFromNoaccessArea();
 
+  // CPU features
+  public native String getCPUFeatures();
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.hotspot.code;
+
+import java.lang.reflect.Executable;
+import sun.hotspot.WhiteBox;
+
+public class NMethod {
+  private static final WhiteBox wb = WhiteBox.getWhiteBox();
+  public static NMethod get(Executable method, boolean isOsr) {
+    Object[] obj = wb.getNMethod(method, isOsr);
+    return obj == null ? null : new NMethod(obj);
+  }
+  private NMethod(Object[] obj) {
+    assert obj.length == 2;
+    comp_level = (Integer) obj[0];
+    insts = (byte[]) obj[1];
+  }
+  public byte[] insts;
+  public int comp_level;
+
+  @Override
+  public String toString() {
+    return "NMethod{" +
+        "insts=" + insts +
+        ", comp_level=" + comp_level +
+        '}';
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/cpuinfo/CPUInfo.java	Thu Mar 27 14:15:42 2014 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.hotspot.cpuinfo;
+
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Information about CPU on test box.
+ *
+ * CPUInfo uses WhiteBox to gather information,
+ * so WhiteBox class should be added to bootclasspath
+ * and option -XX:+WhiteBoxAPI should expclicetly
+ * specified on command line.
+ */
+public class CPUInfo {
+
+    private static final List<String> features;
+    private static final String additionalCPUInfo;
+
+    static {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        Pattern additionalCPUInfoRE =
+            Pattern.compile("([^(]*\\([^)]*\\)[^,]*),\\s*");
+
+        String cpuFeaturesString = wb.getCPUFeatures();
+        Matcher matcher = additionalCPUInfoRE.matcher(cpuFeaturesString);
+        if (matcher.find()) {
+            additionalCPUInfo = matcher.group(1);
+        } else {
+            additionalCPUInfo = "";
+        }
+        String splittedFeatures[] = matcher.replaceAll("").split("(, )| ");
+
+        features = Collections.unmodifiableList(Arrays.
+                                                asList(splittedFeatures));
+    }
+
+    /**
+     * Get additional information about CPU.
+     * For example, on X86 in will be family/model/stepping
+     * and number of cores.
+     *
+     * @return additional CPU info
+     */
+    public static String getAdditionalCPUInfo() {
+        return additionalCPUInfo;
+    }
+
+    /**
+     * Get all known features supported by CPU.
+     *
+     * @return unmodifiable list with names of all known features
+     *         supported by CPU.
+     */
+    public static List<String> getFeatures() {
+        return features;
+    }
+
+    /**
+     * Check if some feature is supported by CPU.
+     *
+     * @param feature Name of feature to be tested.
+     * @return <b>true</b> if tested feature is supported by CPU.
+     */
+    public static boolean hasFeature(String feature) {
+        return features.contains(feature.toLowerCase());
+    }
+}