Merge
authoriveresov
Thu, 09 Oct 2014 12:36:20 -0700
changeset 27018 849e52618298
parent 27010 ecef4adc89f4 (current diff)
parent 27017 bae5d661dd4b (diff)
child 27019 e326bfa8aeed
Merge
--- a/hotspot/make/aix/makefiles/fastdebug.make	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/make/aix/makefiles/fastdebug.make	Thu Oct 09 12:36:20 2014 -0700
@@ -67,7 +67,6 @@
 #    not justified.
 LFLAGS_QIPA=
 
-G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Thu Oct 09 12:36:20 2014 -0700
@@ -268,8 +268,35 @@
 
     ISEL_OPCODE   = (31u << OPCODE_SHIFT |  15u << 1),
 
-    MTLR_OPCODE   = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
-    MFLR_OPCODE   = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
+    // Special purpose registers
+    MTSPR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1),
+    MFSPR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1),
+
+    MTXER_OPCODE  = (MTSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+    MFXER_OPCODE  = (MFSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+
+    MTDSCR_OPCODE = (MTSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+    MFDSCR_OPCODE = (MFSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+
+    MTLR_OPCODE   = (MTSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+    MFLR_OPCODE   = (MFSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+
+    MTCTR_OPCODE  = (MTSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+    MFCTR_OPCODE  = (MFSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+
+    MTTFHAR_OPCODE   = (MTSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MFTFHAR_OPCODE   = (MFSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MTTFIAR_OPCODE   = (MTSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MFTFIAR_OPCODE   = (MFSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MTTEXASR_OPCODE  = (MTSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MFTEXASR_OPCODE  = (MFSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MTTEXASRU_OPCODE = (MTSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+    MFTEXASRU_OPCODE = (MFSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+
+    MTVRSAVE_OPCODE  = (MTSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+    MFVRSAVE_OPCODE  = (MFSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+
+    MFTB_OPCODE   = (MFSPR_OPCODE | 268 << SPR_0_4_SHIFT),
 
     MTCRF_OPCODE  = (31u << OPCODE_SHIFT | 144u << 1),
     MFCR_OPCODE   = (31u << OPCODE_SHIFT | 19u << 1),
@@ -291,9 +318,6 @@
 
     // CTR-related opcodes
     BCCTR_OPCODE  = (19u << OPCODE_SHIFT | 528u << 1),
-    MTCTR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
-    MFCTR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
-
 
     LWZ_OPCODE   = (32u << OPCODE_SHIFT),
     LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
@@ -585,6 +609,37 @@
     MTVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1604u     ),
     MFVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1540u     ),
 
+    // AES (introduced with Power 8)
+    VCIPHER_OPCODE      = (4u  << OPCODE_SHIFT | 1288u),
+    VCIPHERLAST_OPCODE  = (4u  << OPCODE_SHIFT | 1289u),
+    VNCIPHER_OPCODE     = (4u  << OPCODE_SHIFT | 1352u),
+    VNCIPHERLAST_OPCODE = (4u  << OPCODE_SHIFT | 1353u),
+    VSBOX_OPCODE        = (4u  << OPCODE_SHIFT | 1480u),
+
+    // SHA (introduced with Power 8)
+    VSHASIGMAD_OPCODE   = (4u  << OPCODE_SHIFT | 1730u),
+    VSHASIGMAW_OPCODE   = (4u  << OPCODE_SHIFT | 1666u),
+
+    // Vector Binary Polynomial Multiplication (introduced with Power 8)
+    VPMSUMB_OPCODE      = (4u  << OPCODE_SHIFT | 1032u),
+    VPMSUMD_OPCODE      = (4u  << OPCODE_SHIFT | 1224u),
+    VPMSUMH_OPCODE      = (4u  << OPCODE_SHIFT | 1096u),
+    VPMSUMW_OPCODE      = (4u  << OPCODE_SHIFT | 1160u),
+
+    // Vector Permute and Xor (introduced with Power 8)
+    VPERMXOR_OPCODE     = (4u  << OPCODE_SHIFT |   45u),
+
+    // Transactional Memory instructions (introduced with Power 8)
+    TBEGIN_OPCODE    = (31u << OPCODE_SHIFT |  654u << 1),
+    TEND_OPCODE      = (31u << OPCODE_SHIFT |  686u << 1),
+    TABORT_OPCODE    = (31u << OPCODE_SHIFT |  910u << 1),
+    TABORTWC_OPCODE  = (31u << OPCODE_SHIFT |  782u << 1),
+    TABORTWCI_OPCODE = (31u << OPCODE_SHIFT |  846u << 1),
+    TABORTDC_OPCODE  = (31u << OPCODE_SHIFT |  814u << 1),
+    TABORTDCI_OPCODE = (31u << OPCODE_SHIFT |  878u << 1),
+    TSR_OPCODE       = (31u << OPCODE_SHIFT |  750u << 1),
+    TCHECK_OPCODE    = (31u << OPCODE_SHIFT |  718u << 1),
+
     // Icache and dcache related instructions
     DCBA_OPCODE    = (31u << OPCODE_SHIFT |  758u << 1),
     DCBZ_OPCODE    = (31u << OPCODE_SHIFT | 1014u << 1),
@@ -1420,6 +1475,25 @@
   inline void mcrf( ConditionRegister crd, ConditionRegister cra);
   inline void mtcr( Register s);
 
+  // Special purpose registers
+  // Exception Register
+  inline void mtxer(Register s1);
+  inline void mfxer(Register d);
+  // Vector Register Save Register
+  inline void mtvrsave(Register s1);
+  inline void mfvrsave(Register d);
+  // Timebase
+  inline void mftb(Register d);
+  // Introduced with Power 8:
+  // Data Stream Control Register
+  inline void mtdscr(Register s1);
+  inline void mfdscr(Register d );
+  // Transactional Memory Registers
+  inline void mftfhar(Register d);
+  inline void mftfiar(Register d);
+  inline void mftexasr(Register d);
+  inline void mftexasru(Register d);
+
   // PPC 1, section 2.4.1 Branch Instructions
   inline void b(  address a, relocInfo::relocType rt = relocInfo::none);
   inline void b(  Label& L);
@@ -1860,6 +1934,39 @@
   inline void mtvscr(   VectorRegister b);
   inline void mfvscr(   VectorRegister d);
 
+  // AES (introduced with Power 8)
+  inline void vcipher(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipher(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsbox(       VectorRegister d, VectorRegister a);
+
+  // SHA (introduced with Power 8)
+  // Not yet implemented.
+
+  // Vector Binary Polynomial Multiplication (introduced with Power 8)
+  inline void vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b);
+
+  // Vector Permute and Xor (introduced with Power 8)
+  inline void vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+
+  // Transactional Memory instructions (introduced with Power 8)
+  inline void tbegin_();    // R=0
+  inline void tbeginrot_(); // R=1 Rollback-Only Transaction
+  inline void tend_();    // A=0
+  inline void tendall_(); // A=1
+  inline void tabort_(Register a);
+  inline void tabortwc_(int t, Register a, Register b);
+  inline void tabortwci_(int t, Register a, int si);
+  inline void tabortdc_(int t, Register a, Register b);
+  inline void tabortdci_(int t, Register a, int si);
+  inline void tsuspend_(); // tsr with L=0
+  inline void tresume_();  // tsr with L=1
+  inline void tcheck(int f);
+
   // The following encoders use r0 as second operand. These instructions
   // read r0 as '0'.
   inline void lwzx( Register d, Register s2);
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Thu Oct 09 12:36:20 2014 -0700
@@ -312,6 +312,25 @@
                                                       { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
 inline void Assembler::mtcr( Register s)          { Assembler::mtcrf(0xff, s); }
 
+// Special purpose registers
+// Exception Register
+inline void Assembler::mtxer(Register s1)         { emit_int32(MTXER_OPCODE | rs(s1)); }
+inline void Assembler::mfxer(Register d )         { emit_int32(MFXER_OPCODE | rt(d)); }
+// Vector Register Save Register
+inline void Assembler::mtvrsave(Register s1)      { emit_int32(MTVRSAVE_OPCODE | rs(s1)); }
+inline void Assembler::mfvrsave(Register d )      { emit_int32(MFVRSAVE_OPCODE | rt(d)); }
+// Timebase
+inline void Assembler::mftb(Register d )          { emit_int32(MFTB_OPCODE  | rt(d)); }
+// Introduced with Power 8:
+// Data Stream Control Register
+inline void Assembler::mtdscr(Register s1)        { emit_int32(MTDSCR_OPCODE | rs(s1)); }
+inline void Assembler::mfdscr(Register d )        { emit_int32(MFDSCR_OPCODE | rt(d)); }
+// Transactional Memory Registers
+inline void Assembler::mftfhar(Register d )       { emit_int32(MFTFHAR_OPCODE   | rt(d)); }
+inline void Assembler::mftfiar(Register d )       { emit_int32(MFTFIAR_OPCODE   | rt(d)); }
+inline void Assembler::mftexasr(Register d )      { emit_int32(MFTEXASR_OPCODE  | rt(d)); }
+inline void Assembler::mftexasru(Register d )     { emit_int32(MFTEXASRU_OPCODE | rt(d)); }
+
 // SAP JVM 2006-02-13 PPC branch instruction.
 // PPC 1, section 2.4.1 Branch Instructions
 inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
@@ -735,6 +754,39 @@
 inline void Assembler::mtvscr(  VectorRegister b)                                     { emit_int32( MTVSCR_OPCODE   | vrb(b)); }
 inline void Assembler::mfvscr(  VectorRegister d)                                     { emit_int32( MFVSCR_OPCODE   | vrt(d)); }
 
+// AES (introduced with Power 8)
+inline void Assembler::vcipher(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHER_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHERLAST_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipher(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHER_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsbox(       VectorRegister d, VectorRegister a)                   { emit_int32( VSBOX_OPCODE        | vrt(d) | vra(a)         ); }
+
+// SHA (introduced with Power 8)
+// Not yet implemented.
+
+// Vector Binary Polynomial Multiplication (introduced with Power 8)
+inline void Assembler::vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+
+// Vector Permute and Xor (introduced with Power 8)
+inline void Assembler::vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+
+// Transactional Memory instructions (introduced with Power 8)
+inline void Assembler::tbegin_()                                { emit_int32( TBEGIN_OPCODE | rc(1)); }
+inline void Assembler::tbeginrot_()                             { emit_int32( TBEGIN_OPCODE | /*R=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tend_()                                  { emit_int32( TEND_OPCODE | rc(1)); }
+inline void Assembler::tendall_()                               { emit_int32( TEND_OPCODE | /*A=1*/ 1u << (31-6) | rc(1)); }
+inline void Assembler::tabort_(Register a)                      { emit_int32( TABORT_OPCODE | ra(a) | rc(1)); }
+inline void Assembler::tabortwc_(int t, Register a, Register b) { emit_int32( TABORTWC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortwci_(int t, Register a, int si)    { emit_int32( TABORTWCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tabortdc_(int t, Register a, Register b) { emit_int32( TABORTDC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortdci_(int t, Register a, int si)    { emit_int32( TABORTDCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tsuspend_()                              { emit_int32( TSR_OPCODE | rc(1)); }
+inline void Assembler::tresume_()                               { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tcheck(int f)                            { emit_int32( TCHECK_OPCODE | bf(f)); }
+
 // ra0 version
 inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
--- a/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Oct 09 12:36:20 2014 -0700
@@ -37,6 +37,8 @@
 // signatures accordingly.
 const bool CCallingConventionRequiresIntsAsLongs = true;
 
+#define SUPPORTS_NATIVE_CX8
+
 // The PPC CPUs are NOT multiple-copy-atomic.
 #define CPU_NOT_MULTIPLE_COPY_ATOMIC
 
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -25,7 +25,6 @@
 
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interp_masm_ppc_64.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -2366,7 +2366,7 @@
 #endif // INCLUDE_ALL_GCS
 
 // Values for last_Java_pc, and last_Java_sp must comply to the rules
-// in frame_ppc64.hpp.
+// in frame_ppc.hpp.
 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
   // Always set last_Java_pc and flags first because once last_Java_sp
   // is visible has_last_Java_frame is true and users will look at the
@@ -2493,6 +2493,7 @@
 }
 
 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+  assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
   if (src == noreg) src = dst;
   Register shifted_src = src;
   if (Universe::narrow_klass_shift() != 0 ||
@@ -2527,14 +2528,11 @@
 
 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
   if (Universe::heap() != NULL) {
-    if (Universe::narrow_oop_base() == NULL) {
-      Assembler::xorr(R30, R30, R30);
-    } else {
-      load_const(R30, Universe::narrow_ptrs_base(), tmp);
-    }
+    load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
   } else {
-    load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
-    ld(R30, 0, R30);
+    // Heap not yet allocated. Load indirectly.
+    int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
+    ld(R30, simm16_offset, R30);
   }
 }
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Thu Oct 09 12:36:20 2014 -0700
@@ -1249,6 +1249,7 @@
 
     // Emit the trampoline stub which will be related to the branch-and-link below.
     CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+    if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full.
     __ relocate(rtype);
   }
 
@@ -1410,7 +1411,7 @@
     while (bang_offset <= bang_end) {
       // Need at least one stack bang at end of shadow zone.
 
-      // Again I had to copy code, this time from assembler_ppc64.cpp,
+      // Again I had to copy code, this time from assembler_ppc.cpp,
       // bang_stack_with_offset - see there for comments.
 
       // Stack grows down, caller passes positive offset.
@@ -2000,7 +2001,7 @@
 
   // Inline_cache contains a klass.
   Register ic_klass       = as_Register(Matcher::inline_cache_reg_encode());
-  Register receiver_klass = R0;  // tmp
+  Register receiver_klass = R12_scratch2;  // tmp
 
   assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
   assert(R11_scratch1 == R11, "need prologue scratch register");
@@ -3484,6 +3485,7 @@
 
         // Emit the trampoline stub which will be related to the branch-and-link below.
         CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+        if (Compile::current()->env()->failing()) { return; } // Code cache may be full.
         __ relocate(_optimized_virtual ?
                     relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
       }
@@ -3527,6 +3529,7 @@
 
       // Emit the trampoline stub which will be related to the branch-and-link below.
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
       assert(_optimized_virtual, "methodHandle call should be a virtual call");
       __ relocate(relocInfo::opt_virtual_call_type);
     }
@@ -3577,9 +3580,7 @@
       const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
       const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
-
-      if (ra_->C->env()->failing())
-        return;
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
 
       // Build relocation at call site with ic position as data.
       assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
@@ -5638,19 +5639,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-//// Load compressed klass and decode it if narrow_klass_shift == 0.
-//// TODO: will narrow_klass_shift ever be 0?
-//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
-//  match(Set dst (DecodeNKlass (LoadNKlass mem)));
-//  predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
-//  ins_cost(MEMORY_REF_COST);
-//
-//  format %{ "LWZ     $dst, $mem \t// DecodeNKlass (unscaled)" %}
-//  size(4);
-//  ins_encode( enc_lwz(dst, mem) );
-//  ins_pipe(pipe_class_memory);
-//%}
-
 // Load Klass Pointer
 instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
   match(Set dst (LoadKlass mem));
@@ -6070,11 +6058,15 @@
   %}
 %}
 
-instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
+// We have seen a safepoint between the hi and lo parts, and this node was handled
+// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
+// not a narrow oop.
+instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
+  match(Set dst src);
   effect(DEF dst, USE src);
   ins_cost(DEFAULT_COST);
 
-  format %{ "LIS     $dst, $src \t// narrow oop hi" %}
+  format %{ "LIS     $dst, $src \t// narrow klass hi" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_addis);
@@ -6084,6 +6076,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
+instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
+  match(Set dst src1);
+  effect(TEMP src2);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MASK    $dst, $src2, 0xFFFFFFFF" %} // mask
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src2$$Register, 0x20);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // This needs a match rule so that build_oop_map knows this is
 // not a narrow oop.
 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
@@ -6091,10 +6098,10 @@
   effect(TEMP src2);
   ins_cost(DEFAULT_COST);
 
-  format %{ "ADDI    $dst, $src1, $src2 \t// narrow oop lo" %}
-  size(4);
-  ins_encode %{
-    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+  format %{ "ORI    $dst, $src1, $src2 \t// narrow klass lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
     intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
     assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
     int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
@@ -6125,10 +6132,11 @@
     MachNode *m2 = m1;
     if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
       // Value might be 1-extended. Mask out these bits.
-      m2 = new clearMs32bNode();
+      m2 = new loadConNKlass_maskNode();
       m2->add_req(NULL, m1);
       m2->_opnds[0] = op_dst;
-      m2->_opnds[1] = op_dst;
+      m2->_opnds[1] = op_src;
+      m2->_opnds[2] = op_dst;
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       nodes->push(m2);
     }
@@ -6973,7 +6981,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32);
   %}
   ins_pipe(pipe_class_default);
 %}
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_ppc.hpp"
@@ -39,9 +38,6 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/top.hpp"
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
 #include "runtime/thread.inline.hpp"
 
 #define __ _masm->
@@ -216,7 +212,7 @@
     {
       BLOCK_COMMENT("Call frame manager or native entry.");
       // Call frame manager or native entry.
-      Register r_new_arg_entry = R14; // PPC_state;
+      Register r_new_arg_entry = R14;
       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
                                  r_arg_method, r_arg_thread);
 
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -353,7 +353,6 @@
   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
   __ bne(CCR0, notInt);
-  __ isync(); // Order load of constant wrt. tags.
   __ lwax(R17_tos, Rcpool, Rscratch1);
   __ push(itos);
   __ b(exit);
@@ -365,7 +364,6 @@
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
   __ asm_assert_eq("unexpected type", 0x8765);
 #endif
-  __ isync(); // Order load of constant wrt. tags.
   __ lfsx(F15_ftos, Rcpool, Rscratch1);
   __ push(ftos);
 
@@ -424,13 +422,11 @@
   // Check out Conversions.java for an example.
   // Also ConstantPool::header_size() is 20, which makes it very difficult
   // to double-align double on the constant pool. SG, 11/7/97
-  __ isync(); // Order load of constant wrt. tags.
   __ lfdx(F15_ftos, Rcpool, Rindex);
   __ push(dtos);
   __ b(Lexit);
 
   __ bind(Llong);
-  __ isync(); // Order load of constant wrt. tags.
   __ ldx(R17_tos, Rcpool, Rindex);
   __ push(ltos);
 
--- a/hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Thu Oct 09 12:36:20 2014 -0700
@@ -47,4 +47,4 @@
     );
 }
 
-#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
+#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -2069,14 +2069,14 @@
   LIR_Opr base_op = base.result();
   LIR_Opr index_op = idx.result();
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       LIR_Opr long_index_op = index_op;
-      if (x->index()->type()->is_constant()) {
+      if (index_op->is_constant()) {
         long_index_op = new_register(T_LONG);
         __ move(index_op, long_index_op);
       }
@@ -2091,14 +2091,14 @@
   assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
-      if (!x->index()->type()->is_constant()) {
+    if (index_op->type() == T_INT) {
+      if (!index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ convert(Bytecodes::_i2l, idx.result(), index_op);
       }
     } else {
-      assert(x->index()->type()->tag() == longTag, "must be");
-      if (x->index()->type()->is_constant()) {
+      assert(index_op->type() == T_LONG, "must be");
+      if (index_op->is_constant()) {
         index_op = new_register(T_LONG);
         __ move(idx.result(), index_op);
       }
@@ -2179,12 +2179,12 @@
   LIR_Opr index_op = idx.result();
 
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
   }
   if (x->has_index()) {
-    if (x->index()->type()->tag() == longTag) {
+    if (index_op->type() == T_LONG) {
       index_op = new_register(T_INT);
       __ convert(Bytecodes::_l2i, idx.result(), index_op);
     }
@@ -2194,7 +2194,7 @@
   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
 #else
   if (x->has_index()) {
-    if (x->index()->type()->tag() == intTag) {
+    if (index_op->type() == T_INT) {
       index_op = new_register(T_LONG);
       __ convert(Bytecodes::_i2l, idx.result(), index_op);
     }
--- a/hotspot/src/share/vm/code/codeCache.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -254,8 +254,7 @@
   if (!SegmentedCodeCache) {
     // No segmentation: use a single code heap
     return (code_blob_type == CodeBlobType::All);
-  } else if ((Arguments::mode() == Arguments::_int) ||
-             (TieredStopAtLevel == CompLevel_none)) {
+  } else if (Arguments::mode() == Arguments::_int) {
     // Interpreter only: we don't need any method code heaps
     return (code_blob_type == CodeBlobType::NonNMethod);
   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -1153,12 +1153,18 @@
   assert(s == start(), "");
 }
 
+/**
+ * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
+ * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
+ * the ideal graph.
+ */
 StartNode* Compile::start() const {
-  assert(!failing(), "");
+  assert (!failing(), err_msg_res("Must not have pending failure. Reason is: %s", failure_reason()));
   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
     Node* start = root()->fast_out(i);
-    if( start->is_Start() )
+    if (start->is_Start()) {
       return start->as_Start();
+    }
   }
   fatal("Did not find Start node!");
   return NULL;
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Oct 09 12:36:20 2014 -0700
@@ -707,12 +707,15 @@
   void sort_expensive_nodes();
 
   // Compilation environment.
-  Arena*            comp_arena()                { return &_comp_arena; }
-  ciEnv*            env() const                 { return _env; }
-  CompileLog*       log() const                 { return _log; }
-  bool              failing() const             { return _env->failing() || _failure_reason != NULL; }
-  const char*       failure_reason() { return _failure_reason; }
-  bool              failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
+  Arena*      comp_arena()           { return &_comp_arena; }
+  ciEnv*      env() const            { return _env; }
+  CompileLog* log() const            { return _log; }
+  bool        failing() const        { return _env->failing() || _failure_reason != NULL; }
+  const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
+
+  bool failure_reason_is(const char* r) const {
+    return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
+  }
 
   void record_failure(const char* reason);
   void record_method_not_compilable(const char* reason, bool all_tiers = false) {
--- a/hotspot/src/share/vm/opto/doCall.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -802,10 +802,16 @@
     // each arm of the Phi.  If I know something clever about the exceptions
     // I'm loading the class from, I can replace the LoadKlass with the
     // klass constant for the exception oop.
-    if( ex_node->is_Phi() ) {
-      ex_klass_node = new PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
-      for( uint i = 1; i < ex_node->req(); i++ ) {
-        Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
+    if (ex_node->is_Phi()) {
+      ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
+      for (uint i = 1; i < ex_node->req(); i++) {
+        Node* ex_in = ex_node->in(i);
+        if (ex_in == top() || ex_in == NULL) {
+          // This path was not taken.
+          ex_klass_node->init_req(i, top());
+          continue;
+        }
+        Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
         ex_klass_node->init_req( i, k );
       }
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Oct 09 12:36:20 2014 -0700
@@ -540,17 +540,25 @@
     // If there are no current activations of this method on the
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
-      if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
-      }
       // Clear ICStubs to prevent back patching stubs of zombie or unloaded
       // nmethods during the next safepoint (see ICStub::finalize).
-      MutexLocker cl(CompiledIC_lock);
-      nm->clear_ic_stubs();
-      // Code cache state change is tracked in make_zombie()
-      nm->make_zombie();
-      _zombified_count++;
-      SWEEP(nm);
+      {
+        MutexLocker cl(CompiledIC_lock);
+        nm->clear_ic_stubs();
+      }
+      // Acquiring the CompiledIC_lock may block for a safepoint and set the
+      // nmethod to zombie (see 'CodeCache::make_marked_nmethods_zombies').
+      // Check if nmethod is still non-entrant at this point.
+      if (nm->is_not_entrant()) {
+        if (PrintMethodFlushing && Verbose) {
+          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
+        }
+        // Code cache state change is tracked in make_zombie()
+        nm->make_zombie();
+        _zombified_count++;
+        SWEEP(nm);
+      }
+      assert(nm->is_zombie(), "nmethod must be zombie");
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
--- a/hotspot/test/TEST.groups	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/test/TEST.groups	Thu Oct 09 12:36:20 2014 -0700
@@ -447,7 +447,7 @@
   compiler/codegen/ \
   compiler/cpuflags/RestoreMXCSR.java \
   compiler/EscapeAnalysis/ \
-  compiler/exceptions/TestRecursiveReplacedException.java \
+  compiler/exceptions/ \
   compiler/floatingpoint/ModNaN.java \
   compiler/gcbarriers/G1CrashTest.java \
   compiler/inlining/ \
--- a/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Thu Oct 09 13:42:29 2014 +0200
+++ b/hotspot/test/compiler/codecache/CheckSegmentedCodeCache.java	Thu Oct 09 12:36:20 2014 -0700
@@ -38,22 +38,26 @@
 
   private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
     OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldHaveExitValue(0);
     if (enabled) {
       try {
         // Non-nmethod code heap should be always available with the segmented code cache
         out.shouldContain(NON_METHOD);
       } catch (RuntimeException e) {
-        // TieredCompilation is disabled in a client VM
-        out.shouldContain("TieredCompilation is disabled in this release.");
+        // Check if TieredCompilation is disabled (in a client VM)
+        if(!out.getOutput().contains("TieredCompilation is disabled in this release.")) {
+          // Code cache is not segmented
+          throw new RuntimeException("No code cache segmentation.");
+        }
       }
     } else {
       out.shouldNotContain(NON_METHOD);
     }
-    out.shouldHaveExitValue(0);
   }
 
   private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
     OutputAnalyzer out = new OutputAnalyzer(pb.start());
+    out.shouldHaveExitValue(0);
     for (String name : heapNames) {
       out.shouldNotContain(name);
     }
@@ -86,6 +90,10 @@
                                                "-XX:ReservedCodeCacheSize=240m",
                                                "-XX:+PrintCodeCache", "-version");
     verifySegmentedCodeCache(pb, true);
+    pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
+                                               "-XX:ReservedCodeCacheSize=400m",
+                                               "-XX:+PrintCodeCache", "-version");
+    verifySegmentedCodeCache(pb, true);
 
     // Always enabled if SegmentedCodeCache is set
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
@@ -100,12 +108,13 @@
                                                "-Xint",
                                                "-XX:+PrintCodeCache", "-version");
     verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
+
+    // If we stop compilation at CompLevel_none or CompLevel_simple we
+    // don't need a profiled code heap.
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
                                                "-XX:TieredStopAtLevel=0",
                                                "-XX:+PrintCodeCache", "-version");
-    verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
-
-    // If we stop compilation at CompLevel_simple
+    verifyCodeHeapNotExists(pb, PROFILED);
     pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
                                                "-XX:TieredStopAtLevel=1",
                                                "-XX:+PrintCodeCache", "-version");
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/exceptions/CatchInlineExceptions.java	Thu Oct 09 12:36:20 2014 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8059299
+ * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
+ * @run main/othervm -Xbatch CatchInlineExceptions
+ */
+
+class Exception1 extends Exception {};
+class Exception2 extends Exception {};
+
+public class CatchInlineExceptions {
+    private static int counter0;
+    private static int counter1;
+    private static int counter2;
+    private static int counter;
+
+    static void foo(int i) throws Exception {
+        if ((i & 1023) == 2) {
+            counter0++;
+            throw new Exception2();
+        }
+    }
+
+    static void test(int i) throws Exception {
+        try {
+           foo(i);
+        }
+        catch (Exception e) {
+            if (e instanceof Exception1) {
+                counter1++;
+            } else if (e instanceof Exception2) {
+                counter2++;
+            }
+            counter++;
+            throw e;
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+        for (int i = 0; i < 15000; i++) {
+            try {
+                test(i);
+            } catch (Exception e) {
+                // expected
+            }
+        }
+        if (counter1 != 0) {
+            throw new RuntimeException("Failed: counter1(" + counter1  + ") != 0");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter0(" + counter0  + ")");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter(" + counter  + ")");
+        }
+        System.out.println("TEST PASSED");
+    }
+}