43972
|
1 |
/*
|
|
2 |
* Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*/
|
|
23 |
|
|
24 |
package org.graalvm.compiler.core.amd64;
|
|
25 |
|
46344
|
26 |
import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
|
43972
|
27 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
|
|
28 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD;
|
|
29 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD;
|
|
30 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS;
|
|
31 |
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD;
|
|
32 |
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
|
|
33 |
import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
|
|
34 |
import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
|
46344
|
35 |
import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
|
43972
|
36 |
import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
|
|
37 |
|
46344
|
38 |
import org.graalvm.compiler.core.common.NumUtil;
|
43972
|
39 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
|
|
40 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
|
|
41 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
|
|
42 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
|
|
43 |
import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
|
|
44 |
import org.graalvm.compiler.core.common.LIRKind;
|
|
45 |
import org.graalvm.compiler.core.common.calc.Condition;
|
|
46 |
import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
|
|
47 |
import org.graalvm.compiler.core.common.spi.LIRKindTool;
|
|
48 |
import org.graalvm.compiler.debug.GraalError;
|
|
49 |
import org.graalvm.compiler.lir.ConstantValue;
|
|
50 |
import org.graalvm.compiler.lir.LIRFrameState;
|
|
51 |
import org.graalvm.compiler.lir.LIRInstruction;
|
|
52 |
import org.graalvm.compiler.lir.LIRValueUtil;
|
|
53 |
import org.graalvm.compiler.lir.LabelRef;
|
|
54 |
import org.graalvm.compiler.lir.StandardOp.JumpOp;
|
|
55 |
import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp;
|
|
56 |
import org.graalvm.compiler.lir.SwitchStrategy;
|
|
57 |
import org.graalvm.compiler.lir.Variable;
|
|
58 |
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
|
|
59 |
import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
|
|
60 |
import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
|
|
61 |
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
|
|
62 |
import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
|
|
63 |
import org.graalvm.compiler.lir.amd64.AMD64Call;
|
|
64 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
|
|
65 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
|
|
66 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
|
|
67 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;
|
|
68 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
|
|
69 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
|
|
70 |
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
|
|
71 |
import org.graalvm.compiler.lir.amd64.AMD64Move;
|
|
72 |
import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
|
|
73 |
import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
|
|
74 |
import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
|
|
75 |
import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
|
46344
|
76 |
import org.graalvm.compiler.lir.amd64.AMD64StringIndexOfOp;
|
43972
|
77 |
import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
|
|
78 |
import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
|
|
79 |
import org.graalvm.compiler.lir.gen.LIRGenerationResult;
|
|
80 |
import org.graalvm.compiler.lir.gen.LIRGenerator;
|
|
81 |
import org.graalvm.compiler.phases.util.Providers;
|
|
82 |
|
|
83 |
import jdk.vm.ci.amd64.AMD64;
|
|
84 |
import jdk.vm.ci.amd64.AMD64Kind;
|
|
85 |
import jdk.vm.ci.code.CallingConvention;
|
|
86 |
import jdk.vm.ci.code.Register;
|
|
87 |
import jdk.vm.ci.code.RegisterValue;
|
|
88 |
import jdk.vm.ci.code.StackSlot;
|
|
89 |
import jdk.vm.ci.meta.AllocatableValue;
|
|
90 |
import jdk.vm.ci.meta.JavaConstant;
|
|
91 |
import jdk.vm.ci.meta.JavaKind;
|
|
92 |
import jdk.vm.ci.meta.PlatformKind;
|
|
93 |
import jdk.vm.ci.meta.VMConstant;
|
|
94 |
import jdk.vm.ci.meta.Value;
|
|
95 |
import jdk.vm.ci.meta.ValueKind;
|
|
96 |
|
|
97 |
/**
|
|
98 |
* This class implements the AMD64 specific portion of the LIR generator.
|
|
99 |
*/
|
|
100 |
public abstract class AMD64LIRGenerator extends LIRGenerator {
|
|
101 |
|
|
102 |
public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) {
|
|
103 |
super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes);
|
|
104 |
}
|
|
105 |
|
|
106 |
/**
|
|
107 |
* Checks whether the supplied constant can be used without loading it into a register for store
|
|
108 |
* operations, i.e., on the right hand side of a memory access.
|
|
109 |
*
|
|
110 |
* @param c The constant to check.
|
|
111 |
* @return True if the constant can be used directly, false if the constant needs to be in a
|
|
112 |
* register.
|
|
113 |
*/
|
|
114 |
protected static final boolean canStoreConstant(JavaConstant c) {
|
|
115 |
// there is no immediate move of 64-bit constants on Intel
|
|
116 |
switch (c.getJavaKind()) {
|
46344
|
117 |
case Long:
|
|
118 |
return NumUtil.isInt(c.asLong());
|
43972
|
119 |
case Double:
|
|
120 |
return false;
|
|
121 |
case Object:
|
|
122 |
return c.isNull();
|
|
123 |
default:
|
|
124 |
return true;
|
|
125 |
}
|
|
126 |
}
|
|
127 |
|
|
128 |
@Override
|
|
129 |
protected JavaConstant zapValueForKind(PlatformKind kind) {
|
|
130 |
long dead = 0xDEADDEADDEADDEADL;
|
|
131 |
switch ((AMD64Kind) kind) {
|
|
132 |
case BYTE:
|
|
133 |
return JavaConstant.forByte((byte) dead);
|
|
134 |
case WORD:
|
|
135 |
return JavaConstant.forShort((short) dead);
|
|
136 |
case DWORD:
|
|
137 |
return JavaConstant.forInt((int) dead);
|
|
138 |
case QWORD:
|
|
139 |
return JavaConstant.forLong(dead);
|
|
140 |
case SINGLE:
|
|
141 |
return JavaConstant.forFloat(Float.intBitsToFloat((int) dead));
|
|
142 |
default:
|
|
143 |
// we don't support vector types, so just zap with double for all of them
|
|
144 |
return JavaConstant.forDouble(Double.longBitsToDouble(dead));
|
|
145 |
}
|
|
146 |
}
|
|
147 |
|
|
148 |
public AMD64AddressValue asAddressValue(Value address) {
|
|
149 |
if (address instanceof AMD64AddressValue) {
|
|
150 |
return (AMD64AddressValue) address;
|
|
151 |
} else {
|
|
152 |
if (address instanceof JavaConstant) {
|
|
153 |
long displacement = ((JavaConstant) address).asLong();
|
|
154 |
if (NumUtil.isInt(displacement)) {
|
|
155 |
return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement);
|
|
156 |
}
|
|
157 |
}
|
|
158 |
return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0);
|
|
159 |
}
|
|
160 |
}
|
|
161 |
|
|
162 |
@Override
|
|
163 |
public Variable emitAddress(AllocatableValue stackslot) {
|
|
164 |
Variable result = newVariable(LIRKind.value(target().arch.getWordKind()));
|
|
165 |
append(new StackLeaOp(result, stackslot));
|
|
166 |
return result;
|
|
167 |
}
|
|
168 |
|
|
169 |
/**
|
|
170 |
* The AMD64 backend only uses DWORD and QWORD values in registers because of a performance
|
|
171 |
* penalty when accessing WORD or BYTE registers. This function converts small integer kinds to
|
|
172 |
* DWORD.
|
|
173 |
*/
|
|
174 |
@Override
|
|
175 |
public <K extends ValueKind<K>> K toRegisterKind(K kind) {
|
|
176 |
switch ((AMD64Kind) kind.getPlatformKind()) {
|
|
177 |
case BYTE:
|
|
178 |
case WORD:
|
|
179 |
return kind.changeType(AMD64Kind.DWORD);
|
|
180 |
default:
|
|
181 |
return kind;
|
|
182 |
}
|
|
183 |
}
|
|
184 |
|
|
185 |
@Override
|
46344
|
186 |
public Variable emitLogicCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) {
|
43972
|
187 |
ValueKind<?> kind = newValue.getValueKind();
|
|
188 |
assert kind.equals(expectedValue.getValueKind());
|
|
189 |
AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind();
|
|
190 |
|
|
191 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
192 |
RegisterValue raxRes = AMD64.rax.asValue(kind);
|
|
193 |
emitMove(raxRes, expectedValue);
|
|
194 |
append(new CompareAndSwapOp(memKind, raxRes, addressValue, raxRes, asAllocatable(newValue)));
|
|
195 |
|
|
196 |
assert trueValue.getValueKind().equals(falseValue.getValueKind());
|
|
197 |
Variable result = newVariable(trueValue.getValueKind());
|
|
198 |
append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue));
|
|
199 |
return result;
|
|
200 |
}
|
|
201 |
|
|
202 |
@Override
|
46344
|
203 |
public Value emitValueCompareAndSwap(Value address, Value expectedValue, Value newValue) {
|
|
204 |
ValueKind<?> kind = newValue.getValueKind();
|
|
205 |
assert kind.equals(expectedValue.getValueKind());
|
|
206 |
AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind();
|
|
207 |
|
|
208 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
209 |
RegisterValue raxRes = AMD64.rax.asValue(kind);
|
|
210 |
emitMove(raxRes, expectedValue);
|
|
211 |
append(new CompareAndSwapOp(memKind, raxRes, addressValue, raxRes, asAllocatable(newValue)));
|
|
212 |
Variable result = newVariable(kind);
|
|
213 |
emitMove(result, raxRes);
|
|
214 |
return result;
|
|
215 |
}
|
|
216 |
|
|
217 |
public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel,
|
|
218 |
double trueLabelProbability) {
|
|
219 |
assert kind.equals(expectedValue.getValueKind());
|
|
220 |
assert kind.equals(newValue.getValueKind());
|
|
221 |
assert condition == Condition.EQ || condition == Condition.NE;
|
|
222 |
AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind();
|
|
223 |
RegisterValue raxValue = AMD64.rax.asValue(kind);
|
|
224 |
emitMove(raxValue, expectedValue);
|
|
225 |
append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue)));
|
|
226 |
append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability));
|
|
227 |
}
|
|
228 |
|
|
229 |
@Override
|
43972
|
230 |
public Value emitAtomicReadAndAdd(Value address, Value delta) {
|
|
231 |
ValueKind<?> kind = delta.getValueKind();
|
|
232 |
Variable result = newVariable(kind);
|
|
233 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
234 |
append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta)));
|
|
235 |
return result;
|
|
236 |
}
|
|
237 |
|
|
238 |
@Override
|
|
239 |
public Value emitAtomicReadAndWrite(Value address, Value newValue) {
|
|
240 |
ValueKind<?> kind = newValue.getValueKind();
|
|
241 |
Variable result = newVariable(kind);
|
|
242 |
AMD64AddressValue addressValue = asAddressValue(address);
|
|
243 |
append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue)));
|
|
244 |
return result;
|
|
245 |
}
|
|
246 |
|
|
247 |
@Override
|
|
248 |
public void emitNullCheck(Value address, LIRFrameState state) {
|
|
249 |
append(new AMD64Move.NullCheckOp(asAddressValue(address), state));
|
|
250 |
}
|
|
251 |
|
|
252 |
@Override
|
|
253 |
public void emitJump(LabelRef label) {
|
|
254 |
assert label != null;
|
|
255 |
append(new JumpOp(label));
|
|
256 |
}
|
|
257 |
|
|
258 |
@Override
|
|
259 |
public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
|
|
260 |
boolean mirrored = emitCompare(cmpKind, left, right);
|
|
261 |
Condition finalCondition = mirrored ? cond.mirror() : cond;
|
|
262 |
if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
|
|
263 |
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
|
|
264 |
} else {
|
|
265 |
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
|
|
266 |
}
|
|
267 |
}
|
|
268 |
|
|
269 |
public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel,
|
|
270 |
double trueLabelProbability) {
|
|
271 |
boolean mirrored = emitCompareMemory(cmpKind, left, right, state);
|
|
272 |
Condition finalCondition = mirrored ? cond.mirror() : cond;
|
|
273 |
if (cmpKind.isXMM()) {
|
|
274 |
append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
|
|
275 |
} else {
|
|
276 |
append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
|
|
277 |
}
|
|
278 |
}
|
|
279 |
|
|
280 |
@Override
|
|
281 |
public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) {
|
|
282 |
append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability));
|
|
283 |
}
|
|
284 |
|
|
285 |
@Override
|
|
286 |
public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
|
|
287 |
emitIntegerTest(left, right);
|
|
288 |
append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
|
|
289 |
}
|
|
290 |
|
|
291 |
@Override
|
|
292 |
public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
|
|
293 |
boolean mirrored = emitCompare(cmpKind, left, right);
|
|
294 |
Condition finalCondition = mirrored ? cond.mirror() : cond;
|
|
295 |
|
|
296 |
Variable result = newVariable(trueValue.getValueKind());
|
|
297 |
if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
|
|
298 |
append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(trueValue), load(falseValue)));
|
|
299 |
} else {
|
|
300 |
append(new CondMoveOp(result, finalCondition, load(trueValue), loadNonConst(falseValue)));
|
|
301 |
}
|
|
302 |
return result;
|
|
303 |
}
|
|
304 |
|
|
305 |
@Override
|
|
306 |
public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
|
|
307 |
emitIntegerTest(left, right);
|
|
308 |
Variable result = newVariable(trueValue.getValueKind());
|
|
309 |
append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
|
|
310 |
return result;
|
|
311 |
}
|
|
312 |
|
|
313 |
private void emitIntegerTest(Value a, Value b) {
|
|
314 |
assert ((AMD64Kind) a.getPlatformKind()).isInteger();
|
|
315 |
OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
|
|
316 |
if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
|
|
317 |
append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
|
|
318 |
} else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
|
|
319 |
append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
|
|
320 |
} else if (isAllocatableValue(b)) {
|
|
321 |
append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a)));
|
|
322 |
} else {
|
|
323 |
append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b)));
|
|
324 |
}
|
|
325 |
}
|
|
326 |
|
|
327 |
/**
|
|
328 |
* This method emits the compare against memory instruction, and may reorder the operands. It
|
|
329 |
* returns true if it did so.
|
|
330 |
*
|
|
331 |
* @param b the right operand of the comparison
|
|
332 |
* @return true if the left and right operands were switched, false otherwise
|
|
333 |
*/
|
|
334 |
private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
|
|
335 |
OperandSize size;
|
|
336 |
switch (cmpKind) {
|
|
337 |
case BYTE:
|
|
338 |
size = OperandSize.BYTE;
|
|
339 |
break;
|
|
340 |
case WORD:
|
|
341 |
size = OperandSize.WORD;
|
|
342 |
break;
|
|
343 |
case DWORD:
|
|
344 |
size = OperandSize.DWORD;
|
|
345 |
break;
|
|
346 |
case QWORD:
|
|
347 |
size = OperandSize.QWORD;
|
|
348 |
break;
|
|
349 |
case SINGLE:
|
|
350 |
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state));
|
|
351 |
return false;
|
|
352 |
case DOUBLE:
|
|
353 |
append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state));
|
|
354 |
return false;
|
|
355 |
default:
|
|
356 |
throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind);
|
|
357 |
}
|
|
358 |
|
46344
|
359 |
if (isConstantValue(a)) {
|
43972
|
360 |
return emitCompareMemoryConOp(size, asConstantValue(a), b, state);
|
|
361 |
} else {
|
|
362 |
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
|
|
363 |
}
|
|
364 |
}
|
|
365 |
|
|
366 |
protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) {
|
|
367 |
if (JavaConstant.isNull(a.getConstant())) {
|
|
368 |
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state));
|
|
369 |
return true;
|
|
370 |
} else if (a.getConstant() instanceof VMConstant && size == DWORD) {
|
|
371 |
VMConstant vc = (VMConstant) a.getConstant();
|
|
372 |
append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state));
|
|
373 |
return true;
|
|
374 |
} else {
|
|
375 |
long value = a.getJavaConstant().asLong();
|
|
376 |
if (NumUtil.is32bit(value)) {
|
|
377 |
append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state));
|
|
378 |
return true;
|
|
379 |
} else {
|
|
380 |
return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
|
|
381 |
}
|
|
382 |
}
|
|
383 |
}
|
|
384 |
|
|
385 |
private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) {
|
|
386 |
AMD64RMOp op = CMP.getRMOpcode(size);
|
|
387 |
append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state));
|
|
388 |
return false;
|
|
389 |
}
|
|
390 |
|
|
391 |
/**
|
|
392 |
* This method emits the compare instruction, and may reorder the operands. It returns true if
|
|
393 |
* it did so.
|
|
394 |
*
|
|
395 |
* @param a the left operand of the comparison
|
|
396 |
* @param b the right operand of the comparison
|
|
397 |
* @return true if the left and right operands were switched, false otherwise
|
|
398 |
*/
|
|
399 |
private boolean emitCompare(PlatformKind cmpKind, Value a, Value b) {
|
|
400 |
Variable left;
|
|
401 |
Value right;
|
|
402 |
boolean mirrored;
|
|
403 |
if (LIRValueUtil.isVariable(b)) {
|
|
404 |
left = load(b);
|
|
405 |
right = loadNonConst(a);
|
|
406 |
mirrored = true;
|
|
407 |
} else {
|
|
408 |
left = load(a);
|
|
409 |
right = loadNonConst(b);
|
|
410 |
mirrored = false;
|
|
411 |
}
|
|
412 |
((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, left, right);
|
|
413 |
return mirrored;
|
|
414 |
}
|
|
415 |
|
|
416 |
@Override
|
|
417 |
public void emitMembar(int barriers) {
|
|
418 |
int necessaryBarriers = target().arch.requiredBarriers(barriers);
|
|
419 |
if (target().isMP && necessaryBarriers != 0) {
|
|
420 |
append(new MembarOp(necessaryBarriers));
|
|
421 |
}
|
|
422 |
}
|
|
423 |
|
|
424 |
public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments);
|
|
425 |
|
|
426 |
@Override
|
|
427 |
protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) {
|
|
428 |
long maxOffset = linkage.getMaxCallTargetOffset();
|
46344
|
429 |
if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) {
|
43972
|
430 |
append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info));
|
|
431 |
} else {
|
|
432 |
append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info));
|
|
433 |
}
|
|
434 |
}
|
|
435 |
|
|
436 |
@Override
|
|
437 |
public Variable emitByteSwap(Value input) {
|
|
438 |
Variable result = newVariable(LIRKind.combine(input));
|
|
439 |
append(new AMD64ByteSwapOp(result, input));
|
|
440 |
return result;
|
|
441 |
}
|
|
442 |
|
|
443 |
@Override
|
|
444 |
public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) {
|
|
445 |
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
|
|
446 |
append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length)));
|
|
447 |
return result;
|
|
448 |
}
|
|
449 |
|
46344
|
450 |
/**
|
|
451 |
* Return a conservative estimate of the page size for use by the String.indexOf intrinsic.
|
|
452 |
*/
|
|
453 |
protected int getVMPageSize() {
|
|
454 |
return 4096;
|
|
455 |
}
|
|
456 |
|
|
457 |
@Override
|
|
458 |
public Variable emitStringIndexOf(Value source, Value sourceCount, Value target, Value targetCount, int constantTargetCount) {
|
|
459 |
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
|
|
460 |
RegisterValue cnt1 = AMD64.rdx.asValue(sourceCount.getValueKind());
|
|
461 |
emitMove(cnt1, sourceCount);
|
|
462 |
RegisterValue cnt2 = AMD64.rax.asValue(targetCount.getValueKind());
|
|
463 |
emitMove(cnt2, targetCount);
|
|
464 |
append(new AMD64StringIndexOfOp(this, result, source, target, cnt1, cnt2, AMD64.rcx.asValue(), AMD64.xmm0.asValue(), constantTargetCount, getVMPageSize()));
|
|
465 |
return result;
|
|
466 |
}
|
|
467 |
|
43972
|
468 |
@Override
|
|
469 |
public void emitReturn(JavaKind kind, Value input) {
|
|
470 |
AllocatableValue operand = Value.ILLEGAL;
|
|
471 |
if (input != null) {
|
|
472 |
operand = resultOperandFor(kind, input.getValueKind());
|
|
473 |
emitMove(operand, input);
|
|
474 |
}
|
|
475 |
append(new ReturnOp(operand));
|
|
476 |
}
|
|
477 |
|
|
478 |
protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) {
|
|
479 |
return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp);
|
|
480 |
}
|
|
481 |
|
|
482 |
@Override
|
|
483 |
public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) {
|
|
484 |
// a temp is needed for loading object constants
|
|
485 |
boolean needsTemp = !LIRKind.isValue(key);
|
|
486 |
append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL));
|
|
487 |
}
|
|
488 |
|
|
489 |
@Override
|
|
490 |
protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) {
|
|
491 |
append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind())));
|
|
492 |
}
|
|
493 |
|
|
494 |
@Override
|
|
495 |
public void emitPause() {
|
|
496 |
append(new AMD64PauseOp());
|
|
497 |
}
|
|
498 |
|
|
499 |
@Override
|
|
500 |
public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) {
|
|
501 |
return new AMD64ZapRegistersOp(zappedRegisters, zapValues);
|
|
502 |
}
|
|
503 |
|
|
504 |
@Override
|
|
505 |
public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) {
|
|
506 |
return new AMD64ZapStackOp(zappedStack, zapValues);
|
|
507 |
}
|
|
508 |
}
|