43972
|
1 |
/*
|
54084
|
2 |
* Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
|
43972
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*/
|
|
23 |
|
50858
|
24 |
|
|
25 |
|
43972
|
26 |
package org.graalvm.compiler.core.amd64;
|
|
27 |
|
50330
|
28 |
import static jdk.vm.ci.code.ValueUtil.asRegister;
|
46344
|
29 |
import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
|
50330
|
30 |
import static jdk.vm.ci.code.ValueUtil.isRegister;
|
43972
|
31 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
|
51436
|
32 |
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
|
|
33 |
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
|
|
34 |
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
|
|
35 |
import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
|
43972
|
36 |
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
|
50330
|
37 |
import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
|
43972
|
38 |
import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
|
|
39 |
import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
|
46344
|
40 |
import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
|
47798
|
41 |
import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant;
|
43972
|
42 |
import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
|
|
43 |
|
54084
|
44 |
import java.util.Optional;
|
|
45 |
|
47798
|
46 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
|
43972
|
47 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
|
|
48 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
|
|
49 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
|
57537
|
50 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
|
58299
|
51 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRMOp;
|
51436
|
52 |
import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
|
58299
|
53 |
import org.graalvm.compiler.asm.amd64.AVXKind;
|
|
54 |
import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
|
43972
|
55 |
import org.graalvm.compiler.core.common.LIRKind;
|
47798
|
56 |
import org.graalvm.compiler.core.common.NumUtil;
|
43972
|
57 |
import org.graalvm.compiler.core.common.calc.Condition;
|
|
58 |
import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
|
|
59 |
import org.graalvm.compiler.core.common.spi.LIRKindTool;
|
|
60 |
import org.graalvm.compiler.debug.GraalError;
|
|
61 |
import org.graalvm.compiler.lir.ConstantValue;
|
|
62 |
import org.graalvm.compiler.lir.LIRFrameState;
|
|
63 |
import org.graalvm.compiler.lir.LIRInstruction;
|
|
64 |
import org.graalvm.compiler.lir.LIRValueUtil;
|
|
65 |
import org.graalvm.compiler.lir.LabelRef;
|
|
66 |
import org.graalvm.compiler.lir.StandardOp.JumpOp;
|
58299
|
67 |
import org.graalvm.compiler.lir.StandardOp.ZapRegistersOp;
|
43972
|
68 |
import org.graalvm.compiler.lir.SwitchStrategy;
|
|
69 |
import org.graalvm.compiler.lir.Variable;
|
|
70 |
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
|
|
71 |
import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
|
49451
|
72 |
import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp;
|
43972
|
73 |
import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
|
51436
|
74 |
import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp;
|
47798
|
75 |
import org.graalvm.compiler.lir.amd64.AMD64Binary;
|
43972
|
76 |
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
|
|
77 |
import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
|
|
78 |
import org.graalvm.compiler.lir.amd64.AMD64Call;
|
47798
|
79 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow;
|
43972
|
80 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
|
|
81 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
|
47798
|
82 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp;
|
43972
|
83 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
|
|
84 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;
|
47798
|
85 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp;
|
57537
|
86 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp;
|
43972
|
87 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
|
|
88 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
|
|
89 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
|
49451
|
90 |
import org.graalvm.compiler.lir.amd64.AMD64LFenceOp;
|
43972
|
91 |
import org.graalvm.compiler.lir.amd64.AMD64Move;
|
|
92 |
import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
|
|
93 |
import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
|
|
94 |
import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
|
|
95 |
import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
|
52578
|
96 |
import org.graalvm.compiler.lir.amd64.AMD64StringLatin1InflateOp;
|
|
97 |
import org.graalvm.compiler.lir.amd64.AMD64StringUTF16CompressOp;
|
43972
|
98 |
import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
|
|
99 |
import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
|
58299
|
100 |
import org.graalvm.compiler.lir.amd64.AMD64ZeroMemoryOp;
|
|
101 |
import org.graalvm.compiler.lir.amd64.vector.AMD64VectorCompareOp;
|
43972
|
102 |
import org.graalvm.compiler.lir.gen.LIRGenerationResult;
|
|
103 |
import org.graalvm.compiler.lir.gen.LIRGenerator;
|
54084
|
104 |
import org.graalvm.compiler.lir.hashing.Hasher;
|
43972
|
105 |
import org.graalvm.compiler.phases.util.Providers;
|
|
106 |
|
|
107 |
import jdk.vm.ci.amd64.AMD64;
|
|
108 |
import jdk.vm.ci.amd64.AMD64Kind;
|
|
109 |
import jdk.vm.ci.code.CallingConvention;
|
|
110 |
import jdk.vm.ci.code.Register;
|
|
111 |
import jdk.vm.ci.code.RegisterValue;
|
|
112 |
import jdk.vm.ci.code.StackSlot;
|
|
113 |
import jdk.vm.ci.meta.AllocatableValue;
|
|
114 |
import jdk.vm.ci.meta.JavaConstant;
|
|
115 |
import jdk.vm.ci.meta.JavaKind;
|
|
116 |
import jdk.vm.ci.meta.PlatformKind;
|
|
117 |
import jdk.vm.ci.meta.VMConstant;
|
|
118 |
import jdk.vm.ci.meta.Value;
|
|
119 |
import jdk.vm.ci.meta.ValueKind;
|
|
120 |
|
|
121 |
/**
|
|
122 |
* This class implements the AMD64 specific portion of the LIR generator.
|
|
123 |
*/
|
|
124 |
public abstract class AMD64LIRGenerator extends LIRGenerator {
|
|
125 |
|
|
126 |
public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) {
|
|
127 |
super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes);
|
|
128 |
}
|
|
129 |
|
|
130 |
/**
|
|
131 |
* Checks whether the supplied constant can be used without loading it into a register for store
|
|
132 |
* operations, i.e., on the right hand side of a memory access.
|
|
133 |
*
|
|
134 |
* @param c The constant to check.
|
|
135 |
* @return True if the constant can be used directly, false if the constant needs to be in a
|
|
136 |
* register.
|
|
137 |
*/
|
|
138 |
protected static final boolean canStoreConstant(JavaConstant c) {
|
|
139 |
// there is no immediate move of 64-bit constants on Intel
|
|
140 |
switch (c.getJavaKind()) {
|
46344
|
141 |
case Long:
|
|
142 |
return NumUtil.isInt(c.asLong());
|
43972
|
143 |
case Double:
|
|
144 |
return false;
|
|
145 |
case Object:
|
|
146 |
return c.isNull();
|
|
147 |
default:
|
|
148 |
return true;
|
|
149 |
}
|
|
150 |
}
|
|
151 |
|
|
152 |
@Override
|
|
153 |
protected JavaConstant zapValueForKind(PlatformKind kind) {
|
|
154 |
long dead = 0xDEADDEADDEADDEADL;
|
|
155 |
switch ((AMD64Kind) kind) {
|
|
156 |
case BYTE:
|
|
157 |
return JavaConstant.forByte((byte) dead);
|
|
158 |
case WORD:
|
|
159 |
return JavaConstant.forShort((short) dead);
|
|
160 |
case DWORD:
|
|
161 |
return JavaConstant.forInt((int) dead);
|
|
162 |
case QWORD:
|
|
163 |
return JavaConstant.forLong(dead);
|
|
164 |
case SINGLE:
|
|
165 |
return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
|
|
166 |
default:
|
|
167 |
// we don't support vector types, so just zap with double for all of them
|
|
168 |
return JavaConstant.forDouble(Double.longBitsToDouble(dead));
|
|
169 |
}
|
|
170 |
}
|
|
171 |
|
|
172 |
public AMD64AddressValue asAddressValue(Value address) {
|
|
173 |
if (address instanceof AMD64AddressValue) {
|
|
174 |
return (AMD64AddressValue) address;
|
|
175 |
} else {
|
|
176 |
if (address instanceof JavaConstant) {
|
|
177 |
long displacement = ((JavaConstant) address).asLong();
|
|
178 |
if (NumUtil.isInt(displacement)) {
|
|
179 |
return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement);
|
|
180 |
}
|
|
181 |
}
|
|
182 |
return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0);
|
|
183 |
}
|
|
184 |
}
|
|
185 |
|
|
186 |
@Override
|
|
187 |
public Variable emitAddress(AllocatableValue stackslot) {
|
|
188 |
Variable result = newVariable(LIRKind.value(target().arch.getWordKind()));
|
|
189 |
append(new StackLeaOp(result, stackslot));
|
|
190 |
return result;
|
|
191 |
}
|
|
192 |
|
|
193 |
/**
|
|
194 |
* The AMD64 backend only uses DWORD and QWORD values in registers because of a performance
|
|
195 |
* penalty when accessing WORD or BYTE registers. This function converts small integer kinds to
|
|
196 |
* DWORD.
|
|
197 |
*/
|
|
198 |
@Override
|
|
199 |
public <K extends ValueKind<K>> K toRegisterKind(K kind) {
|
|
200 |
switch ((AMD64Kind) kind.getPlatformKind()) {
|
|
201 |
case BYTE:
|
|
202 |
case WORD:
|
|
203 |
return kind.changeType(AMD64Kind.DWORD);
|
|
204 |
default:
|
|
205 |
return kind;
|
|
206 |
}
|
|
207 |
}
|
|
208 |
|
50330
|
209 |
private AllocatableValue asAllocatable(Value value, ValueKind<?> kind) {
|
|
210 |
if (value.getValueKind().equals(kind)) {
|
|
211 |
return asAllocatable(value);
|
|
212 |
} else if (isRegister(value)) {
|
|
213 |
return asRegister(value).asValue(kind);
|
|
214 |
} else if (isConstantValue(value)) {
|
|
215 |
return emitLoadConstant(kind, asConstant(value));
|
|
216 |
} else {
|
|
217 |
Variable variable = newVariable(kind);
|
|
218 |
emitMove(variable, value);
|
|
219 |
return variable;
|
|
220 |
}
|
|
221 |
}
|
|
222 |
|
|
223 |
private Value emitCompareAndSwap(boolean isLogic, LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) {
|
43972
|
224 |
ValueKind<?> kind = newValue.getValueKind();
|
|
225 |
assert kind.equals(expectedValue.getValueKind());
|
|
226 |
|
|
227 |
AMD64AddressValue addressValue = asAddressValue(address);
|
50330
|
228 |
LIRKind integralAccessKind = accessKind;
|
|
229 |
Value reinterpretedExpectedValue = expectedValue;
|
|
230 |
Value reinterpretedNewValue = newValue;
|
|
231 |
boolean isXmm = ((AMD64Kind) accessKind.getPlatformKind()).isXMM();
|
|
232 |
if (isXmm) {
|
|
233 |
if (accessKind.getPlatformKind().equals(AMD64Kind.SINGLE)) {
|
|
234 |
integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Int);
|
|
235 |
} else {
|
|
236 |
integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Long);
|
|
237 |
}
|
|
238 |
reinterpretedExpectedValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, expectedValue);
|
|
239 |
reinterpretedNewValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, newValue);
|
|
240 |
}
|
|
241 |
AMD64Kind memKind = (AMD64Kind) integralAccessKind.getPlatformKind();
|
|
242 |
RegisterValue aRes = AMD64.rax.asValue(integralAccessKind);
|
|
243 |
AllocatableValue allocatableNewValue = asAllocatable(reinterpretedNewValue, integralAccessKind);
|
|
244 |
emitMove(aRes, reinterpretedExpectedValue);
|
|
245 |
append(new CompareAndSwapOp(memKind, aRes, addressValue, aRes, allocatableNewValue));
|
43972
|
246 |
|
50330
|
247 |
if (isLogic) {
|
|
248 |
assert trueValue.getValueKind().equals(falseValue.getValueKind());
|
|
249 |
Variable result = newVariable(trueValue.getValueKind());
|
|
250 |
append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue));
|
|
251 |
return result;
|
|
252 |
} else {
|
|
253 |
if (isXmm) {
|
|
254 |
return arithmeticLIRGen.emitReinterpret(accessKind, aRes);
|
|
255 |
} else {
|
|
256 |
Variable result = newVariable(kind);
|
|
257 |
emitMove(result, aRes);
|
|
258 |
return result;
|
|
259 |
}
|
|
260 |
}
|
43972
|
261 |
}
|
|
262 |
|
|
263 |
@Override
|
50330
|
264 |
public Variable emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) {
|
|
265 |
return (Variable) emitCompareAndSwap(true, accessKind, address, expectedValue, newValue, trueValue, falseValue);
|
|
266 |
}
|
46344
|
267 |
|
50330
|
268 |
@Override
|
|
269 |
public Value emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue) {
|
|
270 |
return emitCompareAndSwap(false, accessKind, address, expectedValue, newValue, null, null);
|
46344
|
271 |
}
|
|
272 |
|
|
273 |
public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel,
|
|
274 |
double trueLabelProbability) {
|
52578
|
275 |
assert kind.getPlatformKind().getSizeInBytes() <= expectedValue.getValueKind().getPlatformKind().getSizeInBytes();
|
|
276 |
assert kind.getPlatformKind().getSizeInBytes() <= newValue.getValueKind().getPlatformKind().getSizeInBytes();
|
46344
|
277 |
assert condition == Condition.EQ || condition == Condition.NE;
|
|
278 |
AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind();
|
|
279 |
RegisterValue raxValue = AMD64.rax.asValue(kind);
|
|
280 |
emitMove(raxValue, expectedValue);
|
|
281 |
append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue)));
|
|
282 |
append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability));
|
|
283 |
}
|
|
284 |
|
|
285 |
@Override
|
50330
|
286 |
public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) {
|
43972
|
287 |
Variable result = newVariable(kind);
|
|
288 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
289 |
append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta)));
|
|
290 |
return result;
|
|
291 |
}
|
|
292 |
|
|
293 |
@Override
|
50330
|
294 |
public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) {
|
43972
|
295 |
Variable result = newVariable(kind);
|
|
296 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
297 |
append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue)));
|
|
298 |
return result;
|
|
299 |
}
|
|
300 |
|
|
301 |
@Override
|
|
302 |
public void emitNullCheck(Value address, LIRFrameState state) {
|
|
303 |
append(new AMD64Move.NullCheckOp(asAddressValue(address), state));
|
|
304 |
}
|
|
305 |
|
|
306 |
@Override
|
|
307 |
public void emitJump(LabelRef label) {
|
|
308 |
assert label != null;
|
|
309 |
append(new JumpOp(label));
|
|
310 |
}
|
|
311 |
|
|
312 |
@Override
|
|
313 |
public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
|
47798
|
314 |
Condition finalCondition = emitCompare(cmpKind, left, right, cond);
|
43972
|
315 |
if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
|
|
316 |
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
|
|
317 |
} else {
|
|
318 |
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
|
|
319 |
}
|
|
320 |
}
|
|
321 |
|
|
322 |
public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel,
|
|
323 |
double trueLabelProbability) {
|
|
324 |
boolean mirrored = emitCompareMemory(cmpKind, left, right, state);
|
|
325 |
Condition finalCondition = mirrored ? cond.mirror() : cond;
|
|
326 |
if (cmpKind.isXMM()) {
|
|
327 |
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
|
|
328 |
} else {
|
|
329 |
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
|
|
330 |
}
|
|
331 |
}
|
|
332 |
|
|
333 |
@Override
|
|
334 |
public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) {
|
|
335 |
append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability));
|
|
336 |
}
|
|
337 |
|
|
338 |
@Override
|
|
339 |
public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
340 |
emitIntegerTest(left, right);
|
|
341 |
append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
|
|
342 |
}
|
|
343 |
|
|
344 |
@Override
|
|
345 |
public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
|
47798
|
346 |
boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE;
|
43972
|
347 |
|
47798
|
348 |
Condition finalCondition = cond;
|
|
349 |
Value finalTrueValue = trueValue;
|
|
350 |
Value finalFalseValue = falseValue;
|
|
351 |
if (isFloatComparison) {
|
|
352 |
// eliminate the parity check in case of a float comparison
|
|
353 |
Value finalLeft = left;
|
|
354 |
Value finalRight = right;
|
|
355 |
if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) {
|
|
356 |
if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) {
|
|
357 |
finalCondition = finalCondition.mirror();
|
|
358 |
finalLeft = right;
|
|
359 |
finalRight = left;
|
|
360 |
} else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) {
|
|
361 |
// negating EQ and NE does not make any sense as we would need to negate
|
|
362 |
// unorderedIsTrue as well (otherwise, we would no longer fulfill the Java
|
|
363 |
// NaN semantics)
|
|
364 |
assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate());
|
|
365 |
finalCondition = finalCondition.negate();
|
|
366 |
finalTrueValue = falseValue;
|
|
367 |
finalFalseValue = trueValue;
|
|
368 |
}
|
|
369 |
}
|
|
370 |
emitRawCompare(cmpKind, finalLeft, finalRight);
|
43972
|
371 |
} else {
|
47798
|
372 |
finalCondition = emitCompare(cmpKind, left, right, cond);
|
|
373 |
}
|
|
374 |
|
|
375 |
boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition);
|
|
376 |
Variable result = newVariable(finalTrueValue.getValueKind());
|
|
377 |
if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) {
|
|
378 |
if (isFloatComparison) {
|
|
379 |
append(new FloatCondSetOp(result, finalCondition));
|
|
380 |
} else {
|
|
381 |
append(new CondSetOp(result, finalCondition));
|
|
382 |
}
|
|
383 |
} else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) {
|
|
384 |
if (isFloatComparison) {
|
|
385 |
if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) {
|
|
386 |
append(new FloatCondSetOp(result, finalCondition.negate()));
|
|
387 |
} else {
|
|
388 |
append(new FloatCondSetOp(result, finalCondition));
|
|
389 |
Variable negatedResult = newVariable(result.getValueKind());
|
|
390 |
append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1));
|
|
391 |
result = negatedResult;
|
|
392 |
}
|
|
393 |
} else {
|
|
394 |
append(new CondSetOp(result, finalCondition.negate()));
|
|
395 |
}
|
|
396 |
} else if (isFloatComparison) {
|
|
397 |
append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue)));
|
|
398 |
} else {
|
|
399 |
append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue)));
|
43972
|
400 |
}
|
|
401 |
return result;
|
|
402 |
}
|
|
403 |
|
|
404 |
@Override
|
|
405 |
public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
|
|
406 |
emitIntegerTest(left, right);
|
|
407 |
Variable result = newVariable(trueValue.getValueKind());
|
|
408 |
append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
|
|
409 |
return result;
|
|
410 |
}
|
|
411 |
|
58299
|
412 |
private static AVXSize getRegisterSize(Value a) {
|
|
413 |
AMD64Kind kind = (AMD64Kind) a.getPlatformKind();
|
|
414 |
if (kind.isXMM()) {
|
|
415 |
return AVXKind.getRegisterSize(kind);
|
|
416 |
} else {
|
|
417 |
return AVXSize.XMM;
|
|
418 |
}
|
|
419 |
}
|
|
420 |
|
43972
|
421 |
private void emitIntegerTest(Value a, Value b) {
|
58299
|
422 |
if (a.getPlatformKind().getVectorLength() > 1) {
|
|
423 |
append(new AMD64VectorCompareOp(VexRMOp.VPTEST, getRegisterSize(a), asAllocatable(a), asAllocatable(b)));
|
43972
|
424 |
} else {
|
58299
|
425 |
assert ((AMD64Kind) a.getPlatformKind()).isInteger();
|
|
426 |
OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
|
|
427 |
if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
|
|
428 |
append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
|
|
429 |
} else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
|
|
430 |
append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
|
|
431 |
} else if (isAllocatableValue(b)) {
|
|
432 |
append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a)));
|
|
433 |
} else {
|
|
434 |
append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b)));
|
|
435 |
}
|
43972
|
436 |
}
|
|
437 |
}
|
|
438 |
|
|
439 |
/**
|
|
440 |
* This method emits the compare against memory instruction, and may reorder the operands. It
|
|
441 |
* returns true if it did so.
|
|
442 |
*
|
|
443 |
* @param b the right operand of the comparison
|
|
444 |
* @return true if the left and right operands were switched, false otherwise
|
|
445 |
*/
|
|
446 |
private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
|
|
447 |
OperandSize size;
|
|
448 |
switch (cmpKind) {
|
|
449 |
case BYTE:
|
|
450 |
size = OperandSize.BYTE;
|
|
451 |
break;
|
|
452 |
case WORD:
|
|
453 |
size = OperandSize.WORD;
|
|
454 |
break;
|
|
455 |
case DWORD:
|
|
456 |
size = OperandSize.DWORD;
|
|
457 |
break;
|
|
458 |
case QWORD:
|
|
459 |
size = OperandSize.QWORD;
|
|
460 |
break;
|
|
461 |
case SINGLE:
|
|
462 |
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state));
|
|
463 |
return false;
|
|
464 |
case DOUBLE:
|
|
465 |
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state));
|
|
466 |
return false;
|
|
467 |
default:
|
|
468 |
throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind);
|
|
469 |
}
|
|
470 |
|
46344
|
471 |
if (isConstantValue(a)) {
|
43972
|
472 |
return emitCompareMemoryConOp(size, asConstantValue(a), b, state);
|
|
473 |
} else {
|
|
474 |
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
|
|
475 |
}
|
|
476 |
}
|
|
477 |
|
|
478 |
protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) {
|
|
479 |
if (JavaConstant.isNull(a.getConstant())) {
|
|
480 |
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state));
|
|
481 |
return true;
|
|
482 |
} else if (a.getConstant() instanceof VMConstant && size == DWORD) {
|
|
483 |
VMConstant vc = (VMConstant) a.getConstant();
|
|
484 |
append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state));
|
|
485 |
return true;
|
|
486 |
} else {
|
|
487 |
long value = a.getJavaConstant().asLong();
|
|
488 |
if (NumUtil.is32bit(value)) {
|
|
489 |
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state));
|
|
490 |
return true;
|
|
491 |
} else {
|
|
492 |
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
|
|
493 |
}
|
|
494 |
}
|
|
495 |
}
|
|
496 |
|
|
497 |
private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) {
|
|
498 |
AMD64RMOp op = CMP.getRMOpcode(size);
|
|
499 |
append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state));
|
|
500 |
return false;
|
|
501 |
}
|
|
502 |
|
|
503 |
/**
|
|
504 |
* This method emits the compare instruction, and may reorder the operands. It returns true if
|
|
505 |
* it did so.
|
|
506 |
*
|
|
507 |
* @param a the left operand of the comparison
|
|
508 |
* @param b the right operand of the comparison
|
47798
|
509 |
* @param cond the condition of the comparison
|
43972
|
510 |
* @return true if the left and right operands were switched, false otherwise
|
|
511 |
*/
|
47798
|
512 |
private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) {
|
43972
|
513 |
if (LIRValueUtil.isVariable(b)) {
|
47798
|
514 |
emitRawCompare(cmpKind, b, a);
|
|
515 |
return cond.mirror();
|
43972
|
516 |
} else {
|
47798
|
517 |
emitRawCompare(cmpKind, a, b);
|
|
518 |
return cond;
|
43972
|
519 |
}
|
47798
|
520 |
}
|
|
521 |
|
|
522 |
private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) {
|
|
523 |
((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right));
|
43972
|
524 |
}
|
|
525 |
|
|
526 |
@Override
|
|
527 |
public void emitMembar(int barriers) {
|
|
528 |
int necessaryBarriers = target().arch.requiredBarriers(barriers);
|
|
529 |
if (target().isMP && necessaryBarriers != 0) {
|
|
530 |
append(new MembarOp(necessaryBarriers));
|
|
531 |
}
|
|
532 |
}
|
|
533 |
|
|
534 |
public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments);
|
|
535 |
|
|
536 |
@Override
|
|
537 |
protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) {
|
|
538 |
long maxOffset = linkage.getMaxCallTargetOffset();
|
46344
|
539 |
if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) {
|
43972
|
540 |
append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info));
|
|
541 |
} else {
|
|
542 |
append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info));
|
|
543 |
}
|
|
544 |
}
|
|
545 |
|
|
546 |
@Override
|
|
547 |
public Variable emitByteSwap(Value input) {
|
|
548 |
Variable result = newVariable(LIRKind.combine(input));
|
|
549 |
append(new AMD64ByteSwapOp(result, input));
|
|
550 |
return result;
|
|
551 |
}
|
|
552 |
|
|
553 |
@Override
|
49451
|
554 |
public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
|
|
555 |
LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD);
|
|
556 |
RegisterValue raxRes = AMD64.rax.asValue(resultKind);
|
|
557 |
RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind());
|
|
558 |
RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind());
|
|
559 |
emitMove(cnt1, length1);
|
|
560 |
emitMove(cnt2, length2);
|
|
561 |
append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
|
|
562 |
Variable result = newVariable(resultKind);
|
|
563 |
emitMove(result, raxRes);
|
|
564 |
return result;
|
|
565 |
}
|
|
566 |
|
|
567 |
@Override
|
58299
|
568 |
public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, boolean directPointers) {
|
43972
|
569 |
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
|
58299
|
570 |
append(new AMD64ArrayEqualsOp(this, kind, kind, result, array1, array2, length, directPointers, getMaxVectorSize()));
|
54084
|
571 |
return result;
|
|
572 |
}
|
|
573 |
|
|
574 |
@Override
|
58299
|
575 |
public Variable emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, boolean directPointers) {
|
54084
|
576 |
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
|
58299
|
577 |
append(new AMD64ArrayEqualsOp(this, kind1, kind2, result, array1, array2, length, directPointers, getMaxVectorSize()));
|
43972
|
578 |
return result;
|
|
579 |
}
|
|
580 |
|
46344
|
581 |
/**
|
52578
|
582 |
* Return the maximum size of vector registers used in SSE/AVX instructions.
|
|
583 |
*/
|
|
584 |
protected int getMaxVectorSize() {
|
|
585 |
// default for "unlimited"
|
|
586 |
return -1;
|
|
587 |
}
|
|
588 |
|
46344
|
589 |
@Override
|
55509
|
590 |
public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) {
|
|
591 |
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
|
|
592 |
append(new AMD64ArrayIndexOfOp(arrayKind, valueKind, findTwoConsecutive, getMaxVectorSize(), this, result,
|
|
593 |
asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(fromIndex), searchValues));
|
46344
|
594 |
return result;
|
|
595 |
}
|
|
596 |
|
43972
|
597 |
@Override
|
52578
|
598 |
public void emitStringLatin1Inflate(Value src, Value dst, Value len) {
|
|
599 |
RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind());
|
|
600 |
RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind());
|
|
601 |
RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind());
|
|
602 |
|
|
603 |
emitMove(rsrc, src);
|
|
604 |
emitMove(rdst, dst);
|
|
605 |
emitMove(rlen, len);
|
|
606 |
|
|
607 |
append(new AMD64StringLatin1InflateOp(this, rsrc, rdst, rlen));
|
|
608 |
}
|
|
609 |
|
|
610 |
@Override
|
|
611 |
public Variable emitStringUTF16Compress(Value src, Value dst, Value len) {
|
|
612 |
RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind());
|
|
613 |
RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind());
|
|
614 |
RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind());
|
|
615 |
|
|
616 |
emitMove(rsrc, src);
|
|
617 |
emitMove(rdst, dst);
|
|
618 |
emitMove(rlen, len);
|
|
619 |
|
|
620 |
LIRKind reskind = LIRKind.value(AMD64Kind.DWORD);
|
|
621 |
RegisterValue rres = AMD64.rax.asValue(reskind);
|
|
622 |
|
|
623 |
append(new AMD64StringUTF16CompressOp(this, rres, rsrc, rdst, rlen));
|
|
624 |
|
|
625 |
Variable res = newVariable(reskind);
|
|
626 |
emitMove(res, rres);
|
|
627 |
return res;
|
51436
|
628 |
}
|
|
629 |
|
|
630 |
@Override
|
43972
|
631 |
public void emitReturn(JavaKind kind, Value input) {
|
|
632 |
AllocatableValue operand = Value.ILLEGAL;
|
|
633 |
if (input != null) {
|
|
634 |
operand = resultOperandFor(kind, input.getValueKind());
|
|
635 |
emitMove(operand, input);
|
|
636 |
}
|
|
637 |
append(new ReturnOp(operand));
|
|
638 |
}
|
|
639 |
|
|
640 |
protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) {
|
|
641 |
return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp);
|
|
642 |
}
|
|
643 |
|
|
644 |
@Override
|
|
645 |
public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) {
|
|
646 |
// a temp is needed for loading object constants
|
|
647 |
boolean needsTemp = !LIRKind.isValue(key);
|
|
648 |
append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL));
|
|
649 |
}
|
|
650 |
|
|
651 |
@Override
|
|
652 |
protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) {
|
|
653 |
append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind())));
|
|
654 |
}
|
|
655 |
|
|
656 |
@Override
|
54084
|
657 |
protected Optional<Hasher> hasherFor(JavaConstant[] keyConstants, double minDensity) {
|
|
658 |
return Hasher.forKeys(keyConstants, minDensity);
|
|
659 |
}
|
|
660 |
|
|
661 |
@Override
|
|
662 |
protected void emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value) {
|
|
663 |
Value index = hasher.hash(value, arithmeticLIRGen);
|
|
664 |
Variable scratch = newVariable(LIRKind.value(target().arch.getWordKind()));
|
|
665 |
Variable entryScratch = newVariable(LIRKind.value(target().arch.getWordKind()));
|
|
666 |
append(new HashTableSwitchOp(keys, defaultTarget, targets, value, index, scratch, entryScratch));
|
|
667 |
}
|
|
668 |
|
|
669 |
@Override
|
43972
|
670 |
public void emitPause() {
|
|
671 |
append(new AMD64PauseOp());
|
|
672 |
}
|
|
673 |
|
|
674 |
@Override
|
58299
|
675 |
public ZapRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) {
|
43972
|
676 |
return new AMD64ZapRegistersOp(zappedRegisters, zapValues);
|
|
677 |
}
|
|
678 |
|
|
679 |
@Override
|
|
680 |
public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) {
|
|
681 |
return new AMD64ZapStackOp(zappedStack, zapValues);
|
|
682 |
}
|
49451
|
683 |
|
52578
|
684 |
@Override
|
|
685 |
public void emitSpeculationFence() {
|
49451
|
686 |
append(new AMD64LFenceOp());
|
|
687 |
}
|
58299
|
688 |
|
|
689 |
@Override
|
58533
|
690 |
public void emitZeroMemory(Value address, Value length, boolean isAligned) {
|
58299
|
691 |
RegisterValue lengthReg = AMD64.rcx.asValue(length.getValueKind());
|
|
692 |
emitMove(lengthReg, length);
|
|
693 |
append(new AMD64ZeroMemoryOp(asAddressValue(address), lengthReg));
|
|
694 |
}
|
43972
|
695 |
}
|