8194861: PPC64 : Need support for VSR spills in ppc.ad
authormhorie
Wed, 24 Jan 2018 11:22:50 +0100
changeset 48810 1f7ebe9dd5b2
parent 48809 a81c930a8838
child 48811 58787a1708d2
8194861: PPC64 : Need support for VSR spills in ppc.ad Reviewed-by: mdoerr, goetz
src/hotspot/cpu/ppc/assembler_ppc.hpp
src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
src/hotspot/cpu/ppc/ppc.ad
src/hotspot/cpu/ppc/vm_version_ppc.cpp
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp	Thu Jan 18 19:21:11 2018 +0100
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp	Wed Jan 24 11:22:50 2018 +0100
@@ -518,6 +518,7 @@
     XXMRGHW_OPCODE = (60u << OPCODE_SHIFT |   18u << 3),
     XXMRGLW_OPCODE = (60u << OPCODE_SHIFT |   50u << 3),
     XXSPLTW_OPCODE = (60u << OPCODE_SHIFT |  164u << 2),
+    XXLOR_OPCODE   = (60u << OPCODE_SHIFT |  146u << 3),
     XXLXOR_OPCODE  = (60u << OPCODE_SHIFT |  154u << 3),
     XXLEQV_OPCODE  = (60u << OPCODE_SHIFT |  186u << 3),
 
@@ -2162,6 +2163,7 @@
   inline void mtvsrd(   VectorSRegister d, Register a);
   inline void mtvsrwz(  VectorSRegister d, Register a);
   inline void xxspltw(  VectorSRegister d, VectorSRegister b, int ui2);
+  inline void xxlor(    VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xxlxor(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
   inline void xxleqv(   VectorSRegister d, VectorSRegister a, VectorSRegister b);
 
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Thu Jan 18 19:21:11 2018 +0100
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp	Wed Jan 24 11:22:50 2018 +0100
@@ -766,6 +766,7 @@
 inline void Assembler::mtvsrd(  VectorSRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d)  | ra(a)); }
 inline void Assembler::mtvsrwz( VectorSRegister d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d) | ra(a)); }
 inline void Assembler::xxspltw( VectorSRegister d, VectorSRegister b, int ui2)           { emit_int32( XXSPLTW_OPCODE | vsrt(d) | vsrb(b) | xxsplt_uim(uimm(ui2,2))); }
+inline void Assembler::xxlor(   VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLOR_OPCODE  | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xxlxor(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLXOR_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::xxleqv(  VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXLEQV_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
 inline void Assembler::mtvrd(    VectorRegister d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
--- a/src/hotspot/cpu/ppc/ppc.ad	Thu Jan 18 19:21:11 2018 +0100
+++ b/src/hotspot/cpu/ppc/ppc.ad	Wed Jan 24 11:22:50 2018 +0100
@@ -1656,9 +1656,9 @@
 
 // =============================================================================
 
-// Figure out which register class each belongs in: rc_int, rc_float or
+// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
 // rc_stack.
-enum RC { rc_bad, rc_int, rc_float, rc_stack };
+enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
 
 static enum RC rc_class(OptoReg::Name reg) {
   // Return the register class for the given register. The given register
@@ -1673,6 +1673,9 @@
   // We have 64 floating-point register halves, starting at index 64.
   if (reg < 64+64) return rc_float;
 
+  // We have 64 vector-scalar registers, starting at index 128.
+  if (reg < 64+64+64) return rc_vs;
+
   // Between float regs & stack are the flags regs.
   assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
 
@@ -1735,6 +1738,58 @@
   if (src_lo == dst_lo && src_hi == dst_hi)
     return size;            // Self copy, no move.
 
+  if (bottom_type()->isa_vect() != NULL && ideal_reg() == Op_VecX) {
+    // Memory->Memory Spill.
+    if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
+      int src_offset = ra_->reg2offset(src_lo);
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ ld(R0, src_offset, R1_SP);
+        __ std(R0, dst_offset, R1_SP);
+        __ ld(R0, src_offset+8, R1_SP);
+        __ std(R0, dst_offset+8, R1_SP);
+      }
+      size += 16;
+    }
+    // VectorSRegister->Memory Spill.
+    else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
+      VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ addi(R0, R1_SP, dst_offset);
+        __ stxvd2x(Rsrc, R0);
+      }
+      size += 8;
+    }
+    // Memory->VectorSRegister Spill.
+    else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
+      VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
+      int src_offset = ra_->reg2offset(src_lo);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ addi(R0, R1_SP, src_offset);
+        __ lxvd2x(Rdst, R0);
+      }
+      size += 8;
+    }
+    // VectorSRegister->VectorSRegister.
+    else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
+      VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
+      VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        __ xxlor(Rdst, Rsrc, Rsrc);
+      }
+      size += 4;
+    }
+    else {
+      ShouldNotReachHere(); // No VSR spill.
+    }
+    return size;
+  }
+
   // --------------------------------------
   // Memory->Memory Spill. Use R0 to hold the value.
   if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
@@ -3524,7 +3579,7 @@
     assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
   %}
 
-  enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc) %{
+  enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc, iRegLdst tmp) %{
     // Create new nodes.
 
     // Make an operand with the bit pattern to load as float.
@@ -3533,8 +3588,8 @@
 
     loadConLReplicatedNodesTuple loadConLNodes =
       loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
-                                OptoReg::Name(R20_H_num), OptoReg::Name(R20_num),
-                                OptoReg::Name(VSR11_num), OptoReg::Name(VSR10_num));
+                                ra_->get_reg_second(n_tmp), ra_->get_reg_first(n_tmp),
+                                ra_->get_reg_second(this), ra_->get_reg_first(this));
 
     // Push new nodes.
     if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
@@ -14013,12 +14068,13 @@
   %}
 %}
 
-instruct repl4F_immF_Ex(vecX dst, immF src) %{
+instruct repl4F_immF_Ex(vecX dst, immF src, iRegLdst tmp) %{
   match(Set dst (ReplicateF src));
   predicate(n->as_Vector()->length() == 4);
+  effect(TEMP tmp);
   ins_cost(10 * DEFAULT_COST);
 
-  postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase) );
+  postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase, tmp) );
 %}
 
 instruct repl4F_immF0(vecX dst, immF_0 zero) %{
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Thu Jan 18 19:21:11 2018 +0100
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Wed Jan 24 11:22:50 2018 +0100
@@ -109,8 +109,7 @@
 
   if (PowerArchitecturePPC64 >= 8) {
     if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
-      // TODO: Switch on when it works stable. Currently, MachSpillCopyNode::implementation code is missing.
-      //FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
+      FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
     }
   } else {
     if (SuperwordUseVSX) {