43972
|
1 |
/*
|
54084
|
2 |
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
43972
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*/
|
50858
|
23 |
|
|
24 |
|
43972
|
25 |
package org.graalvm.compiler.lir.amd64;
|
|
26 |
|
47798
|
27 |
import static jdk.vm.ci.code.ValueUtil.asRegister;
|
|
28 |
import static jdk.vm.ci.code.ValueUtil.isRegister;
|
43972
|
29 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
|
|
30 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
|
|
31 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
|
|
32 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
|
|
33 |
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
|
|
34 |
|
|
35 |
import org.graalvm.compiler.asm.Label;
|
|
36 |
import org.graalvm.compiler.asm.amd64.AMD64Address;
|
|
37 |
import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
|
|
38 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
|
|
39 |
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
|
|
40 |
import org.graalvm.compiler.code.CompilationResult.JumpTable;
|
47798
|
41 |
import org.graalvm.compiler.core.common.NumUtil;
|
43972
|
42 |
import org.graalvm.compiler.core.common.calc.Condition;
|
|
43 |
import org.graalvm.compiler.debug.GraalError;
|
|
44 |
import org.graalvm.compiler.lir.LIRInstructionClass;
|
|
45 |
import org.graalvm.compiler.lir.LabelRef;
|
|
46 |
import org.graalvm.compiler.lir.Opcode;
|
|
47 |
import org.graalvm.compiler.lir.StandardOp;
|
|
48 |
import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
|
|
49 |
import org.graalvm.compiler.lir.SwitchStrategy;
|
|
50 |
import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure;
|
|
51 |
import org.graalvm.compiler.lir.Variable;
|
|
52 |
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
|
|
53 |
|
|
54 |
import jdk.vm.ci.amd64.AMD64;
|
|
55 |
import jdk.vm.ci.amd64.AMD64.CPUFeature;
|
|
56 |
import jdk.vm.ci.amd64.AMD64Kind;
|
|
57 |
import jdk.vm.ci.code.Register;
|
|
58 |
import jdk.vm.ci.meta.AllocatableValue;
|
|
59 |
import jdk.vm.ci.meta.Constant;
|
|
60 |
import jdk.vm.ci.meta.JavaConstant;
|
|
61 |
import jdk.vm.ci.meta.Value;
|
|
62 |
|
|
63 |
public class AMD64ControlFlow {
|
|
64 |
|
|
65 |
public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
|
|
66 |
public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
|
|
67 |
@Use({REG, ILLEGAL}) protected Value x;
|
|
68 |
|
|
69 |
public ReturnOp(Value x) {
|
|
70 |
super(TYPE);
|
|
71 |
this.x = x;
|
|
72 |
}
|
|
73 |
|
|
74 |
@Override
|
|
75 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
76 |
crb.frameContext.leave(crb);
|
|
77 |
/*
|
|
78 |
* We potentially return to the interpreter, and that's an AVX-SSE transition. The only
|
|
79 |
* live value at this point should be the return value in either rax, or in xmm0 with
|
|
80 |
* the upper half of the register unused, so we don't destroy any value here.
|
|
81 |
*/
|
|
82 |
if (masm.supports(CPUFeature.AVX)) {
|
|
83 |
masm.vzeroupper();
|
|
84 |
}
|
|
85 |
masm.ret(0);
|
|
86 |
}
|
|
87 |
}
|
|
88 |
|
|
89 |
public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
|
|
90 |
public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
|
|
91 |
protected final ConditionFlag condition;
|
|
92 |
protected final LabelRef trueDestination;
|
|
93 |
protected final LabelRef falseDestination;
|
|
94 |
|
|
95 |
private final double trueDestinationProbability;
|
|
96 |
|
|
97 |
public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
98 |
this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
|
|
99 |
}
|
|
100 |
|
|
101 |
public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
102 |
this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
|
|
103 |
}
|
|
104 |
|
|
105 |
protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
106 |
super(c);
|
|
107 |
this.condition = condition;
|
|
108 |
this.trueDestination = trueDestination;
|
|
109 |
this.falseDestination = falseDestination;
|
|
110 |
this.trueDestinationProbability = trueDestinationProbability;
|
|
111 |
}
|
|
112 |
|
|
113 |
@Override
|
|
114 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
46807
|
115 |
boolean isNegated = false;
|
|
116 |
int jccPos = masm.position();
|
43972
|
117 |
/*
|
|
118 |
* The strategy for emitting jumps is: If either trueDestination or falseDestination is
|
|
119 |
* the successor block, assume the block scheduler did the correct thing and jcc to the
|
|
120 |
* other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
|
|
121 |
* sure it is more likely to branch on the jcc (= less likely to execute both the jcc
|
|
122 |
* and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
|
|
123 |
* back-edge.
|
|
124 |
*/
|
|
125 |
if (crb.isSuccessorEdge(trueDestination)) {
|
|
126 |
jcc(masm, true, falseDestination);
|
46807
|
127 |
isNegated = true;
|
43972
|
128 |
} else if (crb.isSuccessorEdge(falseDestination)) {
|
|
129 |
jcc(masm, false, trueDestination);
|
|
130 |
} else if (trueDestinationProbability < 0.5) {
|
|
131 |
jcc(masm, true, falseDestination);
|
|
132 |
masm.jmp(trueDestination.label());
|
46807
|
133 |
isNegated = true;
|
43972
|
134 |
} else {
|
|
135 |
jcc(masm, false, trueDestination);
|
|
136 |
masm.jmp(falseDestination.label());
|
|
137 |
}
|
46807
|
138 |
crb.recordBranch(jccPos, isNegated);
|
43972
|
139 |
}
|
|
140 |
|
|
141 |
protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
|
|
142 |
masm.jcc(negate ? condition.negate() : condition, target.label());
|
|
143 |
}
|
|
144 |
}
|
|
145 |
|
|
146 |
public static final class FloatBranchOp extends BranchOp {
|
|
147 |
public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
|
|
148 |
protected boolean unorderedIsTrue;
|
|
149 |
|
|
150 |
public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
151 |
super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
|
|
152 |
this.unorderedIsTrue = unorderedIsTrue;
|
|
153 |
}
|
|
154 |
|
|
155 |
@Override
|
|
156 |
protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
|
|
157 |
floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
|
|
158 |
}
|
|
159 |
}
|
|
160 |
|
|
161 |
public static class StrategySwitchOp extends AMD64BlockEndOp {
|
|
162 |
public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
|
|
163 |
protected final Constant[] keyConstants;
|
|
164 |
private final LabelRef[] keyTargets;
|
|
165 |
private LabelRef defaultTarget;
|
|
166 |
@Alive({REG}) protected Value key;
|
|
167 |
@Temp({REG, ILLEGAL}) protected Value scratch;
|
|
168 |
protected final SwitchStrategy strategy;
|
|
169 |
|
|
170 |
public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
|
|
171 |
this(TYPE, strategy, keyTargets, defaultTarget, key, scratch);
|
|
172 |
}
|
|
173 |
|
|
174 |
protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
|
|
175 |
super(c);
|
|
176 |
this.strategy = strategy;
|
|
177 |
this.keyConstants = strategy.getKeyConstants();
|
|
178 |
this.keyTargets = keyTargets;
|
|
179 |
this.defaultTarget = defaultTarget;
|
|
180 |
this.key = key;
|
|
181 |
this.scratch = scratch;
|
|
182 |
assert keyConstants.length == keyTargets.length;
|
|
183 |
assert keyConstants.length == strategy.keyProbabilities.length;
|
|
184 |
}
|
|
185 |
|
|
186 |
@Override
|
|
187 |
public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
|
|
188 |
strategy.run(new SwitchClosure(asRegister(key), crb, masm));
|
|
189 |
}
|
|
190 |
|
|
191 |
public class SwitchClosure extends BaseSwitchClosure {
|
|
192 |
|
|
193 |
protected final Register keyRegister;
|
|
194 |
protected final CompilationResultBuilder crb;
|
|
195 |
protected final AMD64MacroAssembler masm;
|
|
196 |
|
|
197 |
protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
198 |
super(crb, masm, keyTargets, defaultTarget);
|
|
199 |
this.keyRegister = keyRegister;
|
|
200 |
this.crb = crb;
|
|
201 |
this.masm = masm;
|
|
202 |
}
|
|
203 |
|
|
204 |
protected void emitComparison(Constant c) {
|
|
205 |
JavaConstant jc = (JavaConstant) c;
|
|
206 |
switch (jc.getJavaKind()) {
|
|
207 |
case Int:
|
|
208 |
long lc = jc.asLong();
|
|
209 |
assert NumUtil.isInt(lc);
|
|
210 |
masm.cmpl(keyRegister, (int) lc);
|
|
211 |
break;
|
|
212 |
case Long:
|
|
213 |
masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(jc));
|
|
214 |
break;
|
|
215 |
case Object:
|
57537
|
216 |
AMD64Move.const2reg(crb, masm, asRegister(scratch), jc, AMD64Kind.QWORD);
|
43972
|
217 |
masm.cmpptr(keyRegister, asRegister(scratch));
|
|
218 |
break;
|
|
219 |
default:
|
|
220 |
throw new GraalError("switch only supported for int, long and object");
|
|
221 |
}
|
|
222 |
}
|
|
223 |
|
|
224 |
@Override
|
|
225 |
protected void conditionalJump(int index, Condition condition, Label target) {
|
|
226 |
emitComparison(keyConstants[index]);
|
|
227 |
masm.jcc(intCond(condition), target);
|
|
228 |
}
|
|
229 |
}
|
|
230 |
}
|
|
231 |
|
|
232 |
public static final class TableSwitchOp extends AMD64BlockEndOp {
|
|
233 |
public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
|
|
234 |
private final int lowKey;
|
|
235 |
private final LabelRef defaultTarget;
|
|
236 |
private final LabelRef[] targets;
|
|
237 |
@Use protected Value index;
|
|
238 |
@Temp({REG, HINT}) protected Value idxScratch;
|
|
239 |
@Temp protected Value scratch;
|
|
240 |
|
|
241 |
public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
|
|
242 |
super(TYPE);
|
|
243 |
this.lowKey = lowKey;
|
|
244 |
this.defaultTarget = defaultTarget;
|
|
245 |
this.targets = targets;
|
|
246 |
this.index = index;
|
|
247 |
this.scratch = scratch;
|
|
248 |
this.idxScratch = idxScratch;
|
|
249 |
}
|
|
250 |
|
|
251 |
@Override
|
|
252 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
253 |
Register indexReg = asRegister(index, AMD64Kind.DWORD);
|
|
254 |
Register idxScratchReg = asRegister(idxScratch, AMD64Kind.DWORD);
|
|
255 |
Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
|
|
256 |
|
|
257 |
if (!indexReg.equals(idxScratchReg)) {
|
|
258 |
masm.movl(idxScratchReg, indexReg);
|
|
259 |
}
|
|
260 |
|
|
261 |
// Compare index against jump table bounds
|
|
262 |
int highKey = lowKey + targets.length - 1;
|
|
263 |
if (lowKey != 0) {
|
|
264 |
// subtract the low value from the switch value
|
|
265 |
masm.subl(idxScratchReg, lowKey);
|
|
266 |
masm.cmpl(idxScratchReg, highKey - lowKey);
|
|
267 |
} else {
|
|
268 |
masm.cmpl(idxScratchReg, highKey);
|
|
269 |
}
|
|
270 |
|
|
271 |
// Jump to default target if index is not within the jump table
|
|
272 |
if (defaultTarget != null) {
|
|
273 |
masm.jcc(ConditionFlag.Above, defaultTarget.label());
|
|
274 |
}
|
|
275 |
|
|
276 |
// Set scratch to address of jump table
|
|
277 |
masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
|
|
278 |
final int afterLea = masm.position();
|
|
279 |
|
|
280 |
// Load jump table entry into scratch and jump to it
|
|
281 |
masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
|
|
282 |
masm.addq(scratchReg, idxScratchReg);
|
|
283 |
masm.jmp(scratchReg);
|
|
284 |
|
|
285 |
// Inserting padding so that jump table address is 4-byte aligned
|
54084
|
286 |
masm.align(4);
|
43972
|
287 |
|
|
288 |
// Patch LEA instruction above now that we know the position of the jump table
|
54084
|
289 |
// this is ugly but there is no better way to do this given the assembler API
|
43972
|
290 |
final int jumpTablePos = masm.position();
|
|
291 |
final int leaDisplacementPosition = afterLea - 4;
|
|
292 |
masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
|
|
293 |
|
|
294 |
// Emit jump table entries
|
|
295 |
for (LabelRef target : targets) {
|
|
296 |
Label label = target.label();
|
|
297 |
int offsetToJumpTableBase = masm.position() - jumpTablePos;
|
|
298 |
if (label.isBound()) {
|
|
299 |
int imm32 = label.position() - jumpTablePos;
|
|
300 |
masm.emitInt(imm32);
|
|
301 |
} else {
|
54601
|
302 |
label.addPatchAt(masm.position(), masm);
|
43972
|
303 |
|
|
304 |
masm.emitByte(0); // pseudo-opcode for jump table entry
|
|
305 |
masm.emitShort(offsetToJumpTableBase);
|
|
306 |
masm.emitByte(0); // padding to make jump table entry 4 bytes wide
|
|
307 |
}
|
|
308 |
}
|
|
309 |
|
|
310 |
JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
|
|
311 |
crb.compilationResult.addAnnotation(jt);
|
|
312 |
}
|
|
313 |
}
|
|
314 |
|
54084
|
315 |
public static final class HashTableSwitchOp extends AMD64BlockEndOp {
|
|
316 |
public static final LIRInstructionClass<HashTableSwitchOp> TYPE = LIRInstructionClass.create(HashTableSwitchOp.class);
|
|
317 |
private final JavaConstant[] keys;
|
|
318 |
private final LabelRef defaultTarget;
|
|
319 |
private final LabelRef[] targets;
|
|
320 |
@Alive protected Value value;
|
|
321 |
@Alive protected Value hash;
|
|
322 |
@Temp({REG}) protected Value entryScratch;
|
|
323 |
@Temp({REG}) protected Value scratch;
|
|
324 |
|
|
325 |
public HashTableSwitchOp(final JavaConstant[] keys, final LabelRef defaultTarget, LabelRef[] targets, Value value, Value hash, Variable scratch, Variable entryScratch) {
|
|
326 |
super(TYPE);
|
|
327 |
this.keys = keys;
|
|
328 |
this.defaultTarget = defaultTarget;
|
|
329 |
this.targets = targets;
|
|
330 |
this.value = value;
|
|
331 |
this.hash = hash;
|
|
332 |
this.scratch = scratch;
|
|
333 |
this.entryScratch = entryScratch;
|
|
334 |
}
|
|
335 |
|
|
336 |
@Override
|
|
337 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
338 |
Register valueReg = asRegister(value, AMD64Kind.DWORD);
|
|
339 |
Register indexReg = asRegister(hash, AMD64Kind.DWORD);
|
|
340 |
Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
|
|
341 |
Register entryScratchReg = asRegister(entryScratch, AMD64Kind.QWORD);
|
|
342 |
|
|
343 |
// Set scratch to address of jump table
|
|
344 |
masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
|
|
345 |
final int afterLea = masm.position();
|
|
346 |
|
|
347 |
// When the default target is set, the jump table contains entries with two DWORDS:
|
|
348 |
// the original key before hashing and the label jump address
|
|
349 |
if (defaultTarget != null) {
|
|
350 |
|
|
351 |
// Move the table entry (two DWORDs) into a QWORD
|
|
352 |
masm.movq(entryScratchReg, new AMD64Address(scratchReg, indexReg, Scale.Times8, 0));
|
|
353 |
|
|
354 |
// Jump to the default target if the first DWORD (original key) doesn't match the
|
|
355 |
// current key. Accounts for hash collisions with unknown keys
|
|
356 |
masm.cmpl(entryScratchReg, valueReg);
|
|
357 |
masm.jcc(ConditionFlag.NotEqual, defaultTarget.label());
|
|
358 |
|
|
359 |
// Shift to the second DWORD
|
|
360 |
masm.sarq(entryScratchReg, 32);
|
|
361 |
} else {
|
|
362 |
|
|
363 |
// The jump table has a single DWORD with the label address if there's no
|
|
364 |
// default target
|
|
365 |
masm.movslq(entryScratchReg, new AMD64Address(scratchReg, indexReg, Scale.Times4, 0));
|
|
366 |
}
|
|
367 |
masm.addq(scratchReg, entryScratchReg);
|
|
368 |
masm.jmp(scratchReg);
|
|
369 |
|
|
370 |
// Inserting padding so that jump the table address is aligned
|
|
371 |
if (defaultTarget != null) {
|
|
372 |
masm.align(8);
|
|
373 |
} else {
|
|
374 |
masm.align(4);
|
|
375 |
}
|
|
376 |
|
|
377 |
// Patch LEA instruction above now that we know the position of the jump table
|
|
378 |
// this is ugly but there is no better way to do this given the assembler API
|
|
379 |
final int jumpTablePos = masm.position();
|
|
380 |
final int leaDisplacementPosition = afterLea - 4;
|
|
381 |
masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
|
|
382 |
|
|
383 |
// Emit jump table entries
|
|
384 |
for (int i = 0; i < targets.length; i++) {
|
|
385 |
|
|
386 |
Label label = targets[i].label();
|
|
387 |
|
|
388 |
if (defaultTarget != null) {
|
|
389 |
masm.emitInt(keys[i].asInt());
|
|
390 |
}
|
|
391 |
if (label.isBound()) {
|
|
392 |
int imm32 = label.position() - jumpTablePos;
|
|
393 |
masm.emitInt(imm32);
|
|
394 |
} else {
|
|
395 |
int offsetToJumpTableBase = masm.position() - jumpTablePos;
|
54601
|
396 |
label.addPatchAt(masm.position(), masm);
|
54084
|
397 |
masm.emitByte(0); // pseudo-opcode for jump table entry
|
|
398 |
masm.emitShort(offsetToJumpTableBase);
|
|
399 |
masm.emitByte(0); // padding to make jump table entry 4 bytes wide
|
|
400 |
}
|
|
401 |
}
|
|
402 |
|
|
403 |
JumpTable jt = new JumpTable(jumpTablePos, keys[0].asInt(), keys[keys.length - 1].asInt(), 4);
|
|
404 |
crb.compilationResult.addAnnotation(jt);
|
|
405 |
}
|
|
406 |
}
|
|
407 |
|
47798
|
408 |
@Opcode("SETcc")
|
|
409 |
public static final class CondSetOp extends AMD64LIRInstruction {
|
|
410 |
public static final LIRInstructionClass<CondSetOp> TYPE = LIRInstructionClass.create(CondSetOp.class);
|
|
411 |
@Def({REG, HINT}) protected Value result;
|
|
412 |
private final ConditionFlag condition;
|
|
413 |
|
|
414 |
public CondSetOp(Variable result, Condition condition) {
|
|
415 |
super(TYPE);
|
|
416 |
this.result = result;
|
|
417 |
this.condition = intCond(condition);
|
|
418 |
}
|
|
419 |
|
|
420 |
@Override
|
|
421 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
422 |
setcc(masm, result, condition);
|
|
423 |
}
|
|
424 |
}
|
|
425 |
|
|
426 |
@Opcode("SETcc")
|
|
427 |
public static final class FloatCondSetOp extends AMD64LIRInstruction {
|
|
428 |
public static final LIRInstructionClass<FloatCondSetOp> TYPE = LIRInstructionClass.create(FloatCondSetOp.class);
|
|
429 |
@Def({REG, HINT}) protected Value result;
|
|
430 |
private final ConditionFlag condition;
|
|
431 |
|
|
432 |
public FloatCondSetOp(Variable result, Condition condition) {
|
|
433 |
super(TYPE);
|
|
434 |
this.result = result;
|
|
435 |
this.condition = floatCond(condition);
|
|
436 |
}
|
|
437 |
|
|
438 |
@Override
|
|
439 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
440 |
setcc(masm, result, condition);
|
|
441 |
}
|
|
442 |
}
|
|
443 |
|
43972
|
444 |
@Opcode("CMOVE")
|
|
445 |
public static final class CondMoveOp extends AMD64LIRInstruction {
|
|
446 |
public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
|
|
447 |
@Def({REG, HINT}) protected Value result;
|
|
448 |
@Alive({REG}) protected Value trueValue;
|
|
449 |
@Use({REG, STACK, CONST}) protected Value falseValue;
|
|
450 |
private final ConditionFlag condition;
|
|
451 |
|
|
452 |
public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
|
|
453 |
super(TYPE);
|
|
454 |
this.result = result;
|
|
455 |
this.condition = intCond(condition);
|
|
456 |
this.trueValue = trueValue;
|
|
457 |
this.falseValue = falseValue;
|
|
458 |
}
|
|
459 |
|
|
460 |
@Override
|
|
461 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
462 |
cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
|
|
463 |
}
|
|
464 |
}
|
|
465 |
|
|
466 |
@Opcode("CMOVE")
|
|
467 |
public static final class FloatCondMoveOp extends AMD64LIRInstruction {
|
|
468 |
public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
|
|
469 |
@Def({REG}) protected Value result;
|
|
470 |
@Alive({REG}) protected Value trueValue;
|
|
471 |
@Alive({REG}) protected Value falseValue;
|
|
472 |
private final ConditionFlag condition;
|
|
473 |
private final boolean unorderedIsTrue;
|
|
474 |
|
|
475 |
public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
|
|
476 |
super(TYPE);
|
|
477 |
this.result = result;
|
|
478 |
this.condition = floatCond(condition);
|
|
479 |
this.unorderedIsTrue = unorderedIsTrue;
|
|
480 |
this.trueValue = trueValue;
|
|
481 |
this.falseValue = falseValue;
|
|
482 |
}
|
|
483 |
|
|
484 |
@Override
|
|
485 |
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
|
|
486 |
cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
|
|
487 |
}
|
|
488 |
}
|
|
489 |
|
|
490 |
private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
|
|
491 |
Label endLabel = new Label();
|
|
492 |
if (unorderedIsTrue && !trueOnUnordered(condition)) {
|
|
493 |
masm.jcc(ConditionFlag.Parity, label);
|
|
494 |
} else if (!unorderedIsTrue && trueOnUnordered(condition)) {
|
|
495 |
masm.jccb(ConditionFlag.Parity, endLabel);
|
|
496 |
}
|
|
497 |
masm.jcc(condition, label);
|
|
498 |
masm.bind(endLabel);
|
|
499 |
}
|
|
500 |
|
|
501 |
private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue,
|
|
502 |
Value falseValue) {
|
|
503 |
// check that we don't overwrite an input operand before it is used.
|
|
504 |
assert !result.equals(trueValue);
|
|
505 |
|
|
506 |
AMD64Move.move(crb, masm, result, falseValue);
|
|
507 |
cmove(crb, masm, result, condition, trueValue);
|
|
508 |
|
|
509 |
if (isFloat) {
|
|
510 |
if (unorderedIsTrue && !trueOnUnordered(condition)) {
|
|
511 |
cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
|
|
512 |
} else if (!unorderedIsTrue && trueOnUnordered(condition)) {
|
|
513 |
cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
|
|
514 |
}
|
|
515 |
}
|
|
516 |
}
|
|
517 |
|
|
518 |
private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
|
|
519 |
if (isRegister(other)) {
|
|
520 |
assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
|
|
521 |
switch ((AMD64Kind) other.getPlatformKind()) {
|
|
522 |
case BYTE:
|
|
523 |
case WORD:
|
|
524 |
case DWORD:
|
|
525 |
masm.cmovl(cond, asRegister(result), asRegister(other));
|
|
526 |
break;
|
|
527 |
case QWORD:
|
|
528 |
masm.cmovq(cond, asRegister(result), asRegister(other));
|
|
529 |
break;
|
|
530 |
default:
|
|
531 |
throw GraalError.shouldNotReachHere();
|
|
532 |
}
|
|
533 |
} else {
|
|
534 |
AMD64Address addr = (AMD64Address) crb.asAddress(other);
|
|
535 |
switch ((AMD64Kind) other.getPlatformKind()) {
|
|
536 |
case BYTE:
|
|
537 |
case WORD:
|
|
538 |
case DWORD:
|
|
539 |
masm.cmovl(cond, asRegister(result), addr);
|
|
540 |
break;
|
|
541 |
case QWORD:
|
|
542 |
masm.cmovq(cond, asRegister(result), addr);
|
|
543 |
break;
|
|
544 |
default:
|
|
545 |
throw GraalError.shouldNotReachHere();
|
|
546 |
}
|
|
547 |
}
|
|
548 |
}
|
|
549 |
|
47798
|
550 |
private static void setcc(AMD64MacroAssembler masm, Value result, ConditionFlag cond) {
|
|
551 |
switch ((AMD64Kind) result.getPlatformKind()) {
|
|
552 |
case BYTE:
|
|
553 |
case WORD:
|
|
554 |
case DWORD:
|
|
555 |
masm.setl(cond, asRegister(result));
|
|
556 |
break;
|
|
557 |
case QWORD:
|
|
558 |
masm.setq(cond, asRegister(result));
|
|
559 |
break;
|
|
560 |
default:
|
|
561 |
throw GraalError.shouldNotReachHere();
|
|
562 |
}
|
|
563 |
}
|
|
564 |
|
43972
|
565 |
private static ConditionFlag intCond(Condition cond) {
|
|
566 |
switch (cond) {
|
|
567 |
case EQ:
|
|
568 |
return ConditionFlag.Equal;
|
|
569 |
case NE:
|
|
570 |
return ConditionFlag.NotEqual;
|
|
571 |
case LT:
|
|
572 |
return ConditionFlag.Less;
|
|
573 |
case LE:
|
|
574 |
return ConditionFlag.LessEqual;
|
|
575 |
case GE:
|
|
576 |
return ConditionFlag.GreaterEqual;
|
|
577 |
case GT:
|
|
578 |
return ConditionFlag.Greater;
|
|
579 |
case BE:
|
|
580 |
return ConditionFlag.BelowEqual;
|
|
581 |
case AE:
|
|
582 |
return ConditionFlag.AboveEqual;
|
|
583 |
case AT:
|
|
584 |
return ConditionFlag.Above;
|
|
585 |
case BT:
|
|
586 |
return ConditionFlag.Below;
|
|
587 |
default:
|
|
588 |
throw GraalError.shouldNotReachHere();
|
|
589 |
}
|
|
590 |
}
|
|
591 |
|
|
592 |
private static ConditionFlag floatCond(Condition cond) {
|
|
593 |
switch (cond) {
|
|
594 |
case EQ:
|
|
595 |
return ConditionFlag.Equal;
|
|
596 |
case NE:
|
|
597 |
return ConditionFlag.NotEqual;
|
|
598 |
case LT:
|
|
599 |
return ConditionFlag.Below;
|
|
600 |
case LE:
|
|
601 |
return ConditionFlag.BelowEqual;
|
|
602 |
case GE:
|
|
603 |
return ConditionFlag.AboveEqual;
|
|
604 |
case GT:
|
|
605 |
return ConditionFlag.Above;
|
|
606 |
default:
|
|
607 |
throw GraalError.shouldNotReachHere();
|
|
608 |
}
|
|
609 |
}
|
|
610 |
|
47798
|
611 |
public static boolean trueOnUnordered(Condition condition) {
|
|
612 |
return trueOnUnordered(floatCond(condition));
|
|
613 |
}
|
|
614 |
|
43972
|
615 |
private static boolean trueOnUnordered(ConditionFlag condition) {
|
|
616 |
switch (condition) {
|
|
617 |
case AboveEqual:
|
|
618 |
case NotEqual:
|
|
619 |
case Above:
|
|
620 |
case Less:
|
|
621 |
case Overflow:
|
|
622 |
return false;
|
|
623 |
case Equal:
|
|
624 |
case BelowEqual:
|
|
625 |
case Below:
|
|
626 |
case GreaterEqual:
|
|
627 |
case NoOverflow:
|
|
628 |
return true;
|
|
629 |
default:
|
|
630 |
throw GraalError.shouldNotReachHere();
|
|
631 |
}
|
|
632 |
}
|
|
633 |
}
|