--- a/hotspot/src/share/vm/opto/matcher.cpp Thu May 07 15:34:45 2015 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp Fri May 08 11:49:20 2015 -0700
@@ -83,6 +83,7 @@
idealreg2spillmask [Op_VecD] = NULL;
idealreg2spillmask [Op_VecX] = NULL;
idealreg2spillmask [Op_VecY] = NULL;
+ idealreg2spillmask [Op_VecZ] = NULL;
idealreg2debugmask [Op_RegI] = NULL;
idealreg2debugmask [Op_RegN] = NULL;
@@ -94,6 +95,7 @@
idealreg2debugmask [Op_VecD] = NULL;
idealreg2debugmask [Op_VecX] = NULL;
idealreg2debugmask [Op_VecY] = NULL;
+ idealreg2debugmask [Op_VecZ] = NULL;
idealreg2mhdebugmask[Op_RegI] = NULL;
idealreg2mhdebugmask[Op_RegN] = NULL;
@@ -105,6 +107,7 @@
idealreg2mhdebugmask[Op_VecD] = NULL;
idealreg2mhdebugmask[Op_VecX] = NULL;
idealreg2mhdebugmask[Op_VecY] = NULL;
+ idealreg2mhdebugmask[Op_VecZ] = NULL;
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
}
@@ -413,7 +416,7 @@
void Matcher::init_first_stack_mask() {
// Allocate storage for spill masks as masks for the appropriate load type.
- RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
+ RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+5));
idealreg2spillmask [Op_RegN] = &rms[0];
idealreg2spillmask [Op_RegI] = &rms[1];
@@ -440,6 +443,7 @@
idealreg2spillmask [Op_VecD] = &rms[19];
idealreg2spillmask [Op_VecX] = &rms[20];
idealreg2spillmask [Op_VecY] = &rms[21];
+ idealreg2spillmask [Op_VecZ] = &rms[22];
OptoReg::Name i;
@@ -524,6 +528,18 @@
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
}
+ if (Matcher::vector_size_supported(T_FLOAT,16)) {
+ // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
+ OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
+ for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
+ aligned_stack_mask.Remove(in);
+ in = OptoReg::add(in, -1);
+ }
+ aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
+ assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
+ *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
+ idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
+ }
if (UseFPUForSpilling) {
// This mask logic assumes that the spill operations are
// symmetric and that the registers involved are the same size.
@@ -862,6 +878,10 @@
MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
}
+ if (Matcher::vector_size_supported(T_FLOAT,16)) {
+ MachNode *spillVectZ = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTZ));
+ idealreg2regmask[Op_VecZ] = &spillVectZ->out_RegMask();
+ }
}
#ifdef ASSERT