50609
|
1 |
/*
|
59095
|
2 |
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
50609
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*/
|
50858
|
23 |
|
|
24 |
|
50609
|
25 |
package org.graalvm.compiler.lir.amd64.vector;
|
|
26 |
|
|
27 |
import static jdk.vm.ci.code.ValueUtil.asRegister;
|
|
28 |
import static jdk.vm.ci.code.ValueUtil.isRegister;
|
|
29 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
|
|
30 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
|
|
31 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
|
|
32 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
|
|
33 |
import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
|
|
34 |
import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
|
|
35 |
|
51436
|
36 |
import org.graalvm.compiler.asm.amd64.AMD64Address;
|
|
37 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRMOp;
|
|
38 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp;
|
|
39 |
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
|
|
40 |
import org.graalvm.compiler.asm.amd64.AVXKind;
|
|
41 |
import org.graalvm.compiler.lir.LIRFrameState;
|
|
42 |
import org.graalvm.compiler.lir.LIRInstructionClass;
|
|
43 |
import org.graalvm.compiler.lir.Opcode;
|
|
44 |
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
|
|
45 |
import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
|
|
46 |
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
|
|
47 |
|
|
48 |
import jdk.vm.ci.meta.AllocatableValue;
|
|
49 |
import jdk.vm.ci.meta.Value;
|
|
50 |
|
50609
|
51 |
public class AMD64VectorUnary {
|
|
52 |
|
59095
|
53 |
public static final class AVXUnaryOp extends AMD64VectorInstruction {
|
50609
|
54 |
public static final LIRInstructionClass<AVXUnaryOp> TYPE = LIRInstructionClass.create(AVXUnaryOp.class);
|
|
55 |
|
51436
|
56 |
@Opcode private final VexRMOp opcode;
|
50609
|
57 |
|
|
58 |
@Def({REG}) protected AllocatableValue result;
|
|
59 |
@Use({REG, STACK}) protected AllocatableValue input;
|
|
60 |
|
51436
|
61 |
public AVXUnaryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue input) {
|
59095
|
62 |
super(TYPE, size);
|
50609
|
63 |
this.opcode = opcode;
|
|
64 |
this.result = result;
|
|
65 |
this.input = input;
|
|
66 |
}
|
|
67 |
|
|
68 |
@Override
|
51436
|
69 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
50609
|
70 |
if (isRegister(input)) {
|
51436
|
71 |
opcode.emit(masm, size, asRegister(result), asRegister(input));
|
50609
|
72 |
} else {
|
51436
|
73 |
opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
|
50609
|
74 |
}
|
|
75 |
}
|
|
76 |
}
|
|
77 |
|
59095
|
78 |
public static final class AVXUnaryMemoryOp extends AMD64VectorInstruction {
|
50609
|
79 |
public static final LIRInstructionClass<AVXUnaryMemoryOp> TYPE = LIRInstructionClass.create(AVXUnaryMemoryOp.class);
|
|
80 |
|
51436
|
81 |
@Opcode private final VexRMOp opcode;
|
50609
|
82 |
|
|
83 |
@Def({REG}) protected AllocatableValue result;
|
|
84 |
@Use({COMPOSITE}) protected AMD64AddressValue input;
|
|
85 |
@State protected LIRFrameState state;
|
|
86 |
|
51436
|
87 |
public AVXUnaryMemoryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
|
59095
|
88 |
super(TYPE, size);
|
50609
|
89 |
this.opcode = opcode;
|
|
90 |
this.result = result;
|
|
91 |
this.input = input;
|
|
92 |
this.state = state;
|
|
93 |
}
|
|
94 |
|
|
95 |
@Override
|
51436
|
96 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
50609
|
97 |
if (state != null) {
|
51436
|
98 |
crb.recordImplicitException(masm.position(), state);
|
50609
|
99 |
}
|
51436
|
100 |
opcode.emit(masm, size, asRegister(result), input.toAddress());
|
50609
|
101 |
}
|
|
102 |
}
|
|
103 |
|
59095
|
104 |
public static final class AVXBroadcastOp extends AMD64VectorInstruction {
|
50609
|
105 |
public static final LIRInstructionClass<AVXBroadcastOp> TYPE = LIRInstructionClass.create(AVXBroadcastOp.class);
|
|
106 |
|
51436
|
107 |
@Opcode private final VexRMOp opcode;
|
50609
|
108 |
|
|
109 |
@Def({REG}) protected AllocatableValue result;
|
|
110 |
@Use({REG, STACK, CONST}) protected Value input;
|
|
111 |
|
51436
|
112 |
public AVXBroadcastOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, Value input) {
|
59095
|
113 |
super(TYPE, size);
|
50609
|
114 |
this.opcode = opcode;
|
|
115 |
this.result = result;
|
|
116 |
this.input = input;
|
|
117 |
}
|
|
118 |
|
|
119 |
@Override
|
51436
|
120 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
50609
|
121 |
if (isRegister(input)) {
|
51436
|
122 |
opcode.emit(masm, size, asRegister(result), asRegister(input));
|
50609
|
123 |
} else if (isConstantValue(input)) {
|
|
124 |
int align = input.getPlatformKind().getSizeInBytes();
|
|
125 |
AMD64Address address = (AMD64Address) crb.recordDataReferenceInCode(asConstant(input), align);
|
51436
|
126 |
opcode.emit(masm, size, asRegister(result), address);
|
50609
|
127 |
} else {
|
51436
|
128 |
opcode.emit(masm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
|
50609
|
129 |
}
|
|
130 |
}
|
|
131 |
}
|
|
132 |
|
59095
|
133 |
public static final class AVXConvertMemoryOp extends AMD64VectorInstruction {
|
50609
|
134 |
public static final LIRInstructionClass<AVXConvertMemoryOp> TYPE = LIRInstructionClass.create(AVXConvertMemoryOp.class);
|
|
135 |
|
51436
|
136 |
@Opcode private final VexRVMOp opcode;
|
50609
|
137 |
|
|
138 |
@Def({REG}) protected AllocatableValue result;
|
|
139 |
@Use({COMPOSITE}) protected AMD64AddressValue input;
|
|
140 |
@State protected LIRFrameState state;
|
|
141 |
|
51436
|
142 |
public AVXConvertMemoryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
|
59095
|
143 |
super(TYPE, size);
|
50609
|
144 |
this.opcode = opcode;
|
|
145 |
this.result = result;
|
|
146 |
this.input = input;
|
|
147 |
this.state = state;
|
|
148 |
}
|
|
149 |
|
|
150 |
@Override
|
51436
|
151 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
50609
|
152 |
if (state != null) {
|
51436
|
153 |
crb.recordImplicitException(masm.position(), state);
|
50609
|
154 |
}
|
51436
|
155 |
opcode.emit(masm, size, asRegister(result), asRegister(result), input.toAddress());
|
50609
|
156 |
}
|
|
157 |
}
|
|
158 |
|
51436
|
159 |
public static final class AVXConvertOp extends AMD64LIRInstruction {
|
50609
|
160 |
public static final LIRInstructionClass<AVXConvertOp> TYPE = LIRInstructionClass.create(AVXConvertOp.class);
|
|
161 |
|
51436
|
162 |
@Opcode private final VexRVMOp opcode;
|
50609
|
163 |
@Def({REG}) protected AllocatableValue result;
|
|
164 |
@Use({REG, STACK}) protected AllocatableValue input;
|
|
165 |
|
51436
|
166 |
public AVXConvertOp(VexRVMOp opcode, AllocatableValue result, AllocatableValue input) {
|
50609
|
167 |
super(TYPE);
|
|
168 |
this.opcode = opcode;
|
|
169 |
this.result = result;
|
|
170 |
this.input = input;
|
|
171 |
}
|
|
172 |
|
|
173 |
@Override
|
51436
|
174 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
59095
|
175 |
// Note that we assume only XMM-size instructions are emitted here. Loosening this
|
|
176 |
// restriction would require informing AMD64HotSpotReturnOp when emitting vzeroupper.
|
50609
|
177 |
if (isRegister(input)) {
|
|
178 |
if (!asRegister(input).equals(asRegister(result))) {
|
|
179 |
// clear result register to avoid unnecessary dependency
|
51436
|
180 |
VexRVMOp.VXORPD.emit(masm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(result));
|
50609
|
181 |
}
|
51436
|
182 |
opcode.emit(masm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(input));
|
50609
|
183 |
} else {
|
51436
|
184 |
VexRVMOp.VXORPD.emit(masm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(result));
|
|
185 |
opcode.emit(masm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), (AMD64Address) crb.asAddress(input));
|
50609
|
186 |
}
|
|
187 |
}
|
|
188 |
}
|
|
189 |
}
|