8185969: PPC64: Improve VSR support to use up to 64 registers
authormdoerr
Mon, 14 Aug 2017 16:48:44 +0200
changeset 46809 057f21a10f5f
parent 46808 76fb4ae9fd2f
child 46810 7dad333205cd
8185969: PPC64: Improve VSR support to use up to 64 registers Reviewed-by: mdoerr, goetz Contributed-by: Gustavo Serra Scalet <gustavo.scalet@eldorado.org.br>
hotspot/src/cpu/ppc/vm/assembler_ppc.hpp
hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp
hotspot/src/cpu/ppc/vm/register_ppc.cpp
hotspot/src/cpu/ppc/vm/register_ppc.hpp
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Sat Aug 12 01:21:21 2017 +0000
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Mon Aug 14 16:48:44 2017 +0200
@@ -510,8 +510,13 @@
     LXVD2X_OPCODE  = (31u << OPCODE_SHIFT |  844u << 1),
     STXVD2X_OPCODE = (31u << OPCODE_SHIFT |  972u << 1),
     MTVSRD_OPCODE  = (31u << OPCODE_SHIFT |  179u << 1),
+    MTVSRWZ_OPCODE = (31u << OPCODE_SHIFT |  243u << 1),
     MFVSRD_OPCODE  = (31u << OPCODE_SHIFT |   51u << 1),
     MTVSRWA_OPCODE = (31u << OPCODE_SHIFT |  211u << 1),
+    MFVSRWZ_OPCODE = (31u << OPCODE_SHIFT |  115u << 1),
+    XXPERMDI_OPCODE= (60u << OPCODE_SHIFT |   10u << 3),
+    XXMRGHW_OPCODE = (60u << OPCODE_SHIFT |   18u << 3),
+    XXMRGLW_OPCODE = (60u << OPCODE_SHIFT |   50u << 3),
 
     // Vector Permute and Formatting
     VPKPX_OPCODE   = (4u  << OPCODE_SHIFT |  782u     ),
@@ -561,6 +566,7 @@
     VADDUBM_OPCODE = (4u  << OPCODE_SHIFT |    0u     ),
     VADDUWM_OPCODE = (4u  << OPCODE_SHIFT |  128u     ),
     VADDUHM_OPCODE = (4u  << OPCODE_SHIFT |   64u     ),
+    VADDUDM_OPCODE = (4u  << OPCODE_SHIFT |  192u     ),
     VADDUBS_OPCODE = (4u  << OPCODE_SHIFT |  512u     ),
     VADDUWS_OPCODE = (4u  << OPCODE_SHIFT |  640u     ),
     VADDUHS_OPCODE = (4u  << OPCODE_SHIFT |  576u     ),
@@ -1099,16 +1105,19 @@
   static int vrs(   VectorRegister r)  { return  vrs(r->encoding());}
   static int vrt(   VectorRegister r)  { return  vrt(r->encoding());}
 
+  // Only used on SHA sigma instructions (VX-form)
+  static int vst(      int         x)  { return  opp_u_field(x,             16, 16); }
+  static int vsix(     int         x)  { return  opp_u_field(x,             20, 17); }
+
   // Support Vector-Scalar (VSX) instructions.
-  static int vsra(      int         x)  { return  opp_u_field(x,            15, 11); }
-  static int vsrb(      int         x)  { return  opp_u_field(x,            20, 16); }
-  static int vsrc(      int         x)  { return  opp_u_field(x,            25, 21); }
-  static int vsrs(      int         x)  { return  opp_u_field(x,            10,  6); }
-  static int vsrt(      int         x)  { return  opp_u_field(x,            10,  6); }
+  static int vsra(      int         x)  { return  opp_u_field(x & 0x1F,     15, 11) | opp_u_field((x & 0x20) >> 5, 29, 29); }
+  static int vsrb(      int         x)  { return  opp_u_field(x & 0x1F,     20, 16) | opp_u_field((x & 0x20) >> 5, 30, 30); }
+  static int vsrs(      int         x)  { return  opp_u_field(x & 0x1F,     10,  6) | opp_u_field((x & 0x20) >> 5, 31, 31); }
+  static int vsrt(      int         x)  { return  vsrs(x); }
+  static int vsdm(      int         x)  { return  opp_u_field(x,            23, 22); }
 
   static int vsra(   VectorSRegister r)  { return  vsra(r->encoding());}
   static int vsrb(   VectorSRegister r)  { return  vsrb(r->encoding());}
-  static int vsrc(   VectorSRegister r)  { return  vsrc(r->encoding());}
   static int vsrs(   VectorSRegister r)  { return  vsrs(r->encoding());}
   static int vsrt(   VectorSRegister r)  { return  vsrt(r->encoding());}
 
@@ -2038,6 +2047,7 @@
   inline void vaddubm(  VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vadduwm(  VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vadduhm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddudm(  VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vaddubs(  VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vadduws(  VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vadduhs(  VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2113,6 +2123,7 @@
   inline void vandc(    VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vnor(     VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vor(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmr(      VectorRegister d, VectorRegister a);
   inline void vxor(     VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vrld(     VectorRegister d, VectorRegister a, VectorRegister b);
   inline void vrlb(     VectorRegister d, VectorRegister a, VectorRegister b);
@@ -2136,8 +2147,19 @@
   inline void lxvd2x(   VectorSRegister d, Register a, Register b);
   inline void stxvd2x(  VectorSRegister d, Register a);
   inline void stxvd2x(  VectorSRegister d, Register a, Register b);
+  inline void mtvrwz(   VectorRegister  d, Register a);
+  inline void mfvrwz(   Register        a, VectorRegister d);
   inline void mtvrd(    VectorRegister  d, Register a);
   inline void mfvrd(    Register        a, VectorRegister d);
+  inline void xxpermdi( VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm);
+  inline void xxmrghw(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void xxmrglw(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
+
+  // VSX Extended Mnemonics
+  inline void xxspltd(  VectorSRegister d, VectorSRegister a, int x);
+  inline void xxmrghd(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void xxmrgld(  VectorSRegister d, VectorSRegister a, VectorSRegister b);
+  inline void xxswapd(  VectorSRegister d, VectorSRegister a);
 
   // Vector-Scalar (VSX) instructions.
   inline void mtfprd(   FloatRegister   d, Register a);
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Sat Aug 12 01:21:21 2017 +0000
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Mon Aug 14 16:48:44 2017 +0200
@@ -758,12 +758,23 @@
 inline void Assembler::lvsr(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
 
 // Vector-Scalar (VSX) instructions.
-inline void Assembler::lxvd2x (VectorSRegister d, Register s1) { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra(0) | rb(s1)); }
-inline void Assembler::lxvd2x (VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::stxvd2x(VectorSRegister d, Register s1) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
-inline void Assembler::stxvd2x(VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
-inline void Assembler::mtvrd(  VectorRegister  d, Register a)               { emit_int32( MTVSRD_OPCODE  | vrt(d)  | ra(a)  | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
-inline void Assembler::mfvrd(  Register        a, VectorRegister d)         { emit_int32( MFVSRD_OPCODE  | vrt(d)  | ra(a)  | 1u); } // 1u: d is treated as Vector (VMX/Altivec).
+inline void Assembler::lxvd2x(  VectorSRegister d, Register s1)              { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra(0) | rb(s1)); }
+inline void Assembler::lxvd2x(  VectorSRegister d, Register s1, Register s2) { emit_int32( LXVD2X_OPCODE  | vsrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1)              { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra(0) | rb(s1)); }
+inline void Assembler::stxvd2x( VectorSRegister d, Register s1, Register s2) { emit_int32( STXVD2X_OPCODE | vsrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::mtvrd(   VectorRegister  d, Register a)               { emit_int32( MTVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mfvrd(   Register        a, VectorRegister d)         { emit_int32( MFVSRD_OPCODE  | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mtvrwz(  VectorRegister  d, Register a)               { emit_int32( MTVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::mfvrwz(  Register        a, VectorRegister d)         { emit_int32( MFVSRWZ_OPCODE | vsrt(d->to_vsr()) | ra(a)); }
+inline void Assembler::xxpermdi(VectorSRegister d, VectorSRegister a, VectorSRegister b, int dm) { emit_int32( XXPERMDI_OPCODE | vsrt(d) | vsra(a) | vsrb(b) | vsdm(dm)); }
+inline void Assembler::xxmrghw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+inline void Assembler::xxmrglw( VectorSRegister d, VectorSRegister a, VectorSRegister b) { emit_int32( XXMRGHW_OPCODE | vsrt(d) | vsra(a) | vsrb(b)); }
+
+// VSX Extended Mnemonics
+inline void Assembler::xxspltd( VectorSRegister d, VectorSRegister a, int x)             { xxpermdi(d, a, a, x ? 3 : 0); }
+inline void Assembler::xxmrghd( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 0); }
+inline void Assembler::xxmrgld( VectorSRegister d, VectorSRegister a, VectorSRegister b) { xxpermdi(d, a, b, 3); }
+inline void Assembler::xxswapd( VectorSRegister d, VectorSRegister a)                    { xxpermdi(d, a, a, 2); }
 
 // Vector-Scalar (VSX) instructions.
 inline void Assembler::mtfprd(  FloatRegister   d, Register a)      { emit_int32( MTVSRD_OPCODE  | frt(d)  | ra(a)); }
@@ -811,6 +822,7 @@
 inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddudm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUDM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
@@ -887,6 +899,7 @@
 inline void Assembler::vandc(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vnor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vor(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmr(     VectorRegister d, VectorRegister a)                   { emit_int32( VOR_OPCODE      | vrt(d) | vra(a) | vrb(a)); }
 inline void Assembler::vxor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vrld(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLD_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
 inline void Assembler::vrlb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
--- a/hotspot/src/cpu/ppc/vm/register_ppc.cpp	Sat Aug 12 01:21:21 2017 +0000
+++ b/hotspot/src/cpu/ppc/vm/register_ppc.cpp	Mon Aug 14 16:48:44 2017 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,8 +81,17 @@
     "VSR0",  "VSR1",  "VSR2",  "VSR3",  "VSR4",  "VSR5",  "VSR6",  "VSR7",
     "VSR8",  "VSR9",  "VSR10", "VSR11", "VSR12", "VSR13", "VSR14", "VSR15",
     "VSR16", "VSR17", "VSR18", "VSR19", "VSR20", "VSR21", "VSR22", "VSR23",
-    "VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31"
+    "VSR24", "VSR25", "VSR26", "VSR27", "VSR28", "VSR29", "VSR30", "VSR31",
+    "VSR32", "VSR33", "VSR34", "VSR35", "VSR36", "VSR37", "VSR38", "VSR39",
+    "VSR40", "VSR41", "VSR42", "VSR43", "VSR44", "VSR45", "VSR46", "VSR47",
+    "VSR48", "VSR49", "VSR50", "VSR51", "VSR52", "VSR53", "VSR54", "VSR55",
+    "VSR56", "VSR57", "VSR58", "VSR59", "VSR60", "VSR61", "VSR62", "VSR63"
   };
   return is_valid() ? names[encoding()] : "vsnoreg";
 }
 
+// Method to convert a VectorRegister to a Vector-Scalar Register (VectorSRegister)
+VectorSRegister VectorRegisterImpl::to_vsr() const {
+  if (this == vnoreg) { return vsnoregi; }
+  return as_VectorSRegister(encoding() + 32);
+}
--- a/hotspot/src/cpu/ppc/vm/register_ppc.hpp	Sat Aug 12 01:21:21 2017 +0000
+++ b/hotspot/src/cpu/ppc/vm/register_ppc.hpp	Mon Aug 14 16:48:44 2017 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -398,6 +398,11 @@
   return (VectorRegister)(intptr_t)encoding;
 }
 
+// Forward declaration
+// Use VectorSRegister as a shortcut.
+class VectorSRegisterImpl;
+typedef VectorSRegisterImpl* VectorSRegister;
+
 // The implementation of vector registers for the Power architecture
 class VectorRegisterImpl: public AbstractRegisterImpl {
  public:
@@ -415,6 +420,9 @@
   bool is_valid()       const { return   0 <=  value()       &&  value() < number_of_registers; }
 
   const char* name() const;
+
+  // convert to VSR
+  VectorSRegister to_vsr() const;
 };
 
 // The Vector registers of the Power architecture
@@ -491,10 +499,6 @@
 #endif // DONT_USE_REGISTER_DEFINES
 
 
-// Use VectorSRegister as a shortcut.
-class VectorSRegisterImpl;
-typedef VectorSRegisterImpl* VectorSRegister;
-
 inline VectorSRegister as_VectorSRegister(int encoding) {
   return (VectorSRegister)(intptr_t)encoding;
 }
@@ -503,7 +507,7 @@
 class VectorSRegisterImpl: public AbstractRegisterImpl {
  public:
   enum {
-    number_of_registers = 32
+    number_of_registers = 64
   };
 
   // construction
@@ -554,6 +558,38 @@
 CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR29, (29));
 CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR30, (30));
 CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR31, (31));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR32, (32));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR33, (33));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR34, (34));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR35, (35));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR36, (36));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR37, (37));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR38, (38));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR39, (39));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR40, (40));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR41, (41));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR42, (42));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR43, (43));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR44, (44));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR45, (45));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR46, (46));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR47, (47));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR48, (48));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR49, (49));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR50, (50));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR51, (51));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR52, (52));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR53, (53));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR54, (54));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR55, (55));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR56, (56));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR57, (57));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR58, (58));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR59, (59));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR60, (60));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR61, (61));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR62, (62));
+CONSTANT_REGISTER_DECLARATION(VectorSRegister, VSR63, (63));
 
 #ifndef DONT_USE_REGISTER_DEFINES
 #define vsnoregi ((VectorSRegister)(vsnoreg_VectorSRegisterEnumValue))
@@ -589,6 +625,38 @@
 #define VSR29   ((VectorSRegister)(  VSR29_VectorSRegisterEnumValue))
 #define VSR30   ((VectorSRegister)(  VSR30_VectorSRegisterEnumValue))
 #define VSR31   ((VectorSRegister)(  VSR31_VectorSRegisterEnumValue))
+#define VSR32   ((VectorSRegister)(  VSR32_VectorSRegisterEnumValue))
+#define VSR33   ((VectorSRegister)(  VSR33_VectorSRegisterEnumValue))
+#define VSR34   ((VectorSRegister)(  VSR34_VectorSRegisterEnumValue))
+#define VSR35   ((VectorSRegister)(  VSR35_VectorSRegisterEnumValue))
+#define VSR36   ((VectorSRegister)(  VSR36_VectorSRegisterEnumValue))
+#define VSR37   ((VectorSRegister)(  VSR37_VectorSRegisterEnumValue))
+#define VSR38   ((VectorSRegister)(  VSR38_VectorSRegisterEnumValue))
+#define VSR39   ((VectorSRegister)(  VSR39_VectorSRegisterEnumValue))
+#define VSR40   ((VectorSRegister)(  VSR40_VectorSRegisterEnumValue))
+#define VSR41   ((VectorSRegister)(  VSR41_VectorSRegisterEnumValue))
+#define VSR42   ((VectorSRegister)(  VSR42_VectorSRegisterEnumValue))
+#define VSR43   ((VectorSRegister)(  VSR43_VectorSRegisterEnumValue))
+#define VSR44   ((VectorSRegister)(  VSR44_VectorSRegisterEnumValue))
+#define VSR45   ((VectorSRegister)(  VSR45_VectorSRegisterEnumValue))
+#define VSR46   ((VectorSRegister)(  VSR46_VectorSRegisterEnumValue))
+#define VSR47   ((VectorSRegister)(  VSR47_VectorSRegisterEnumValue))
+#define VSR48   ((VectorSRegister)(  VSR48_VectorSRegisterEnumValue))
+#define VSR49   ((VectorSRegister)(  VSR49_VectorSRegisterEnumValue))
+#define VSR50   ((VectorSRegister)(  VSR50_VectorSRegisterEnumValue))
+#define VSR51   ((VectorSRegister)(  VSR51_VectorSRegisterEnumValue))
+#define VSR52   ((VectorSRegister)(  VSR52_VectorSRegisterEnumValue))
+#define VSR53   ((VectorSRegister)(  VSR53_VectorSRegisterEnumValue))
+#define VSR54   ((VectorSRegister)(  VSR54_VectorSRegisterEnumValue))
+#define VSR55   ((VectorSRegister)(  VSR55_VectorSRegisterEnumValue))
+#define VSR56   ((VectorSRegister)(  VSR56_VectorSRegisterEnumValue))
+#define VSR57   ((VectorSRegister)(  VSR57_VectorSRegisterEnumValue))
+#define VSR58   ((VectorSRegister)(  VSR58_VectorSRegisterEnumValue))
+#define VSR59   ((VectorSRegister)(  VSR59_VectorSRegisterEnumValue))
+#define VSR60   ((VectorSRegister)(  VSR60_VectorSRegisterEnumValue))
+#define VSR61   ((VectorSRegister)(  VSR61_VectorSRegisterEnumValue))
+#define VSR62   ((VectorSRegister)(  VSR62_VectorSRegisterEnumValue))
+#define VSR63   ((VectorSRegister)(  VSR63_VectorSRegisterEnumValue))
 #endif // DONT_USE_REGISTER_DEFINES
 
 // Maximum number of incoming arguments that can be passed in i registers.
@@ -609,7 +677,7 @@
       * 2                                          // register halves
       + ConditionRegisterImpl::number_of_registers // condition code registers
       + SpecialRegisterImpl::number_of_registers   // special registers
-      + VectorRegisterImpl::number_of_registers    // vector registers
+      + VectorRegisterImpl::number_of_registers    // VSX registers
   };
 
   static const int max_gpr;