42664
|
1 |
/*
|
|
2 |
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "asm/macroAssembler.inline.hpp"
|
|
27 |
#include "runtime/os.hpp"
|
|
28 |
|
|
29 |
void MacroAssembler::breakpoint(AsmCondition cond) {
|
|
30 |
if (cond == al) {
|
|
31 |
emit_int32(0xe7f001f0);
|
|
32 |
} else {
|
|
33 |
call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type, cond);
|
|
34 |
}
|
|
35 |
}
|
|
36 |
|
|
37 |
// atomic_cas_bool
|
|
38 |
//
|
|
39 |
// Perform an atomic compare and exchange and return bool result
|
|
40 |
//
|
|
41 |
// inputs:
|
|
42 |
// oldval value to compare to
|
|
43 |
// newval value to store if *(base+offset) == oldval
|
|
44 |
// base base address of storage location
|
|
45 |
// offset offset added to base to form dest address
|
|
46 |
// output:
|
|
47 |
// Z flag is set in success
|
|
48 |
|
|
49 |
void MacroAssembler::atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg) {
|
|
50 |
if (VM_Version::supports_ldrex()) {
|
|
51 |
Register tmp_reg;
|
|
52 |
if (tmpreg == noreg) {
|
|
53 |
push(LR);
|
|
54 |
tmp_reg = LR;
|
|
55 |
} else {
|
|
56 |
tmp_reg = tmpreg;
|
|
57 |
}
|
|
58 |
assert_different_registers(tmp_reg, oldval, newval, base);
|
|
59 |
Label loop;
|
|
60 |
bind(loop);
|
|
61 |
ldrex(tmp_reg, Address(base, offset));
|
|
62 |
subs(tmp_reg, tmp_reg, oldval);
|
|
63 |
strex(tmp_reg, newval, Address(base, offset), eq);
|
|
64 |
cmp(tmp_reg, 1, eq);
|
|
65 |
b(loop, eq);
|
|
66 |
cmp(tmp_reg, 0);
|
|
67 |
if (tmpreg == noreg) {
|
|
68 |
pop(tmp_reg);
|
|
69 |
}
|
|
70 |
} else if (VM_Version::supports_kuser_cmpxchg32()) {
|
|
71 |
// On armv5 platforms we must use the Linux kernel helper
|
|
72 |
// function for atomic cas operations since ldrex/strex is
|
|
73 |
// not supported.
|
|
74 |
//
|
|
75 |
// This is a special routine at a fixed address 0xffff0fc0 with
|
|
76 |
// with these arguments and results
|
|
77 |
//
|
|
78 |
// input:
|
|
79 |
// r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
|
|
80 |
// output:
|
|
81 |
// r0 = 0 carry set on success
|
|
82 |
// r0 != 0 carry clear on failure
|
|
83 |
//
|
|
84 |
// r3, ip and flags are clobbered
|
|
85 |
//
|
|
86 |
|
|
87 |
Label loop;
|
|
88 |
|
|
89 |
push(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));
|
|
90 |
|
|
91 |
Register tmp_reg = LR; // ignore the argument
|
|
92 |
|
|
93 |
assert_different_registers(tmp_reg, oldval, newval, base);
|
|
94 |
|
|
95 |
// Shuffle registers for kernel call
|
|
96 |
if (oldval != R0) {
|
|
97 |
if (newval == R0) {
|
|
98 |
mov(tmp_reg, newval);
|
|
99 |
newval = tmp_reg;
|
|
100 |
}
|
|
101 |
if (base == R0) {
|
|
102 |
mov(tmp_reg, base);
|
|
103 |
base = tmp_reg;
|
|
104 |
}
|
|
105 |
mov(R0, oldval);
|
|
106 |
}
|
|
107 |
if(newval != R1) {
|
|
108 |
if(base == R1) {
|
|
109 |
if(newval == R2) {
|
|
110 |
mov(tmp_reg, base);
|
|
111 |
base = tmp_reg;
|
|
112 |
}
|
|
113 |
else {
|
|
114 |
mov(R2, base);
|
|
115 |
base = R2;
|
|
116 |
}
|
|
117 |
}
|
|
118 |
mov(R1, newval);
|
|
119 |
}
|
|
120 |
if (base != R2)
|
|
121 |
mov(R2, base);
|
|
122 |
|
|
123 |
if (offset != 0)
|
|
124 |
add(R2, R2, offset);
|
|
125 |
|
|
126 |
mvn(R3, 0xf000);
|
|
127 |
mov(LR, PC);
|
|
128 |
sub(PC, R3, 0x3f);
|
|
129 |
cmp (R0, 0);
|
|
130 |
|
|
131 |
pop(RegisterSet(R0, R3) | RegisterSet(R12) | RegisterSet(LR));
|
|
132 |
} else {
|
|
133 |
// Should never run on a platform so old that it does not have kernel helper
|
|
134 |
stop("Atomic cmpxchg32 unsupported on this platform");
|
|
135 |
}
|
|
136 |
}
|
|
137 |
|
|
138 |
// atomic_cas
|
|
139 |
//
|
|
140 |
// Perform an atomic compare and exchange and return previous value
|
|
141 |
//
|
|
142 |
// inputs:
|
|
143 |
// prev temporary register (destroyed)
|
|
144 |
// oldval value to compare to
|
|
145 |
// newval value to store if *(base+offset) == oldval
|
|
146 |
// base base address of storage location
|
|
147 |
// offset offset added to base to form dest address
|
|
148 |
// output:
|
|
149 |
// returns previous value from *(base+offset) in R0
|
|
150 |
|
|
151 |
void MacroAssembler::atomic_cas(Register temp1, Register temp2, Register oldval, Register newval, Register base, int offset) {
|
|
152 |
if (temp1 != R0) {
|
|
153 |
// try to read the previous value directly in R0
|
|
154 |
if (temp2 == R0) {
|
|
155 |
// R0 declared free
|
|
156 |
temp2 = temp1;
|
|
157 |
temp1 = R0;
|
|
158 |
} else if ((oldval != R0) && (newval != R0) && (base != R0)) {
|
|
159 |
// free, and scratched on return
|
|
160 |
temp1 = R0;
|
|
161 |
}
|
|
162 |
}
|
|
163 |
if (VM_Version::supports_ldrex()) {
|
|
164 |
Label loop;
|
|
165 |
assert_different_registers(temp1, temp2, oldval, newval, base);
|
|
166 |
|
|
167 |
bind(loop);
|
|
168 |
ldrex(temp1, Address(base, offset));
|
|
169 |
cmp(temp1, oldval);
|
|
170 |
strex(temp2, newval, Address(base, offset), eq);
|
|
171 |
cmp(temp2, 1, eq);
|
|
172 |
b(loop, eq);
|
|
173 |
if (temp1 != R0) {
|
|
174 |
mov(R0, temp1);
|
|
175 |
}
|
|
176 |
} else if (VM_Version::supports_kuser_cmpxchg32()) {
|
|
177 |
// On armv5 platforms we must use the Linux kernel helper
|
|
178 |
// function for atomic cas operations since ldrex/strex is
|
|
179 |
// not supported.
|
|
180 |
//
|
|
181 |
// This is a special routine at a fixed address 0xffff0fc0
|
|
182 |
//
|
|
183 |
// input:
|
|
184 |
// r0 = oldval, r1 = newval, r2 = ptr, lr = return adress
|
|
185 |
// output:
|
|
186 |
// r0 = 0 carry set on success
|
|
187 |
// r0 != 0 carry clear on failure
|
|
188 |
//
|
|
189 |
// r3, ip and flags are clobbered
|
|
190 |
//
|
|
191 |
Label done;
|
|
192 |
Label loop;
|
|
193 |
|
|
194 |
push(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));
|
|
195 |
|
|
196 |
if ( oldval != R0 || newval != R1 || base != R2 ) {
|
|
197 |
push(oldval);
|
|
198 |
push(newval);
|
|
199 |
push(base);
|
|
200 |
pop(R2);
|
|
201 |
pop(R1);
|
|
202 |
pop(R0);
|
|
203 |
}
|
|
204 |
|
|
205 |
if (offset != 0) {
|
|
206 |
add(R2, R2, offset);
|
|
207 |
}
|
|
208 |
|
|
209 |
mov(R4, R0);
|
|
210 |
bind(loop);
|
|
211 |
ldr(R0, Address(R2));
|
|
212 |
cmp(R0, R4);
|
|
213 |
b(done, ne);
|
|
214 |
mvn(R12, 0xf000);
|
|
215 |
mov(LR, PC);
|
|
216 |
sub(PC, R12, 0x3f);
|
|
217 |
b(loop, cc);
|
|
218 |
mov(R0, R4);
|
|
219 |
bind(done);
|
|
220 |
|
|
221 |
pop(RegisterSet(R1, R4) | RegisterSet(R12) | RegisterSet(LR));
|
|
222 |
} else {
|
|
223 |
// Should never run on a platform so old that it does not have kernel helper
|
|
224 |
stop("Atomic cmpxchg32 unsupported on this platform");
|
|
225 |
}
|
|
226 |
}
|
|
227 |
|
|
228 |
// atomic_cas64
|
|
229 |
//
|
|
230 |
// Perform a 64 bit atomic compare and exchange and return previous value
|
|
231 |
// as well as returning status in 'result' register
|
|
232 |
//
|
|
233 |
// inputs:
|
|
234 |
// oldval_lo, oldval_hi value to compare to
|
|
235 |
// newval_lo, newval_hi value to store if *(base+offset) == oldval
|
|
236 |
// base base address of storage location
|
|
237 |
// offset offset added to base to form dest address
|
|
238 |
// output:
|
|
239 |
// memval_lo, memval_hi, result
|
|
240 |
// returns previous value from *(base+offset) in memval_lo/hi
|
|
241 |
// returns status in result, 1==success, 0==failure
|
|
242 |
// C1 just uses status result
|
|
243 |
// VM code uses previous value returned in memval_lo/hi
|
|
244 |
|
|
245 |
void MacroAssembler::atomic_cas64(Register memval_lo, Register memval_hi, Register result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset) {
|
|
246 |
if (VM_Version::supports_ldrexd()) {
|
|
247 |
Label loop;
|
|
248 |
assert_different_registers(memval_lo, memval_hi, result, oldval_lo,
|
|
249 |
oldval_hi, newval_lo, newval_hi, base);
|
|
250 |
assert(memval_hi == memval_lo + 1 && memval_lo < R9, "cmpxchg_long: illegal registers");
|
|
251 |
assert(oldval_hi == oldval_lo + 1 && oldval_lo < R9, "cmpxchg_long: illegal registers");
|
|
252 |
assert(newval_hi == newval_lo + 1 && newval_lo < R9, "cmpxchg_long: illegal registers");
|
|
253 |
assert(result != R10, "cmpxchg_long: illegal registers");
|
|
254 |
assert(base != R10, "cmpxchg_long: illegal registers");
|
|
255 |
|
|
256 |
mov(result, 0);
|
|
257 |
bind(loop);
|
|
258 |
ldrexd(memval_lo, Address(base, offset));
|
|
259 |
cmp(memval_lo, oldval_lo);
|
|
260 |
cmp(memval_hi, oldval_hi, eq);
|
|
261 |
strexd(result, newval_lo, Address(base, offset), eq);
|
|
262 |
rsbs(result, result, 1, eq);
|
|
263 |
b(loop, eq);
|
|
264 |
} else if (VM_Version::supports_kuser_cmpxchg64()) {
|
|
265 |
// On armv5 platforms we must use the Linux kernel helper
|
|
266 |
// function for atomic cas64 operations since ldrexd/strexd is
|
|
267 |
// not supported.
|
|
268 |
//
|
|
269 |
// This is a special routine at a fixed address 0xffff0f60
|
|
270 |
//
|
|
271 |
// input:
|
|
272 |
// r0 = (long long *)oldval, r1 = (long long *)newval,
|
|
273 |
// r2 = ptr, lr = return adress
|
|
274 |
// output:
|
|
275 |
// r0 = 0 carry set on success
|
|
276 |
// r0 != 0 carry clear on failure
|
|
277 |
//
|
|
278 |
// r3, and flags are clobbered
|
|
279 |
//
|
|
280 |
Label done;
|
|
281 |
Label loop;
|
|
282 |
|
|
283 |
if (result != R12) {
|
|
284 |
push(R12);
|
|
285 |
}
|
|
286 |
push(RegisterSet(R10) | RegisterSet(LR));
|
|
287 |
mov(R10, SP); // Save SP
|
|
288 |
|
|
289 |
bic(SP, SP, StackAlignmentInBytes - 1); // align stack
|
|
290 |
push(RegisterSet(oldval_lo, oldval_hi));
|
|
291 |
push(RegisterSet(newval_lo, newval_hi));
|
|
292 |
|
|
293 |
if ((offset != 0) || (base != R12)) {
|
|
294 |
add(R12, base, offset);
|
|
295 |
}
|
|
296 |
push(RegisterSet(R0, R3));
|
|
297 |
bind(loop);
|
|
298 |
ldrd(memval_lo, Address(R12)); //current
|
|
299 |
ldrd(oldval_lo, Address(SP, 24));
|
|
300 |
cmp(memval_lo, oldval_lo);
|
|
301 |
cmp(memval_hi, oldval_hi, eq);
|
|
302 |
pop(RegisterSet(R0, R3), ne);
|
|
303 |
mov(result, 0, ne);
|
|
304 |
b(done, ne);
|
|
305 |
// Setup for kernel call
|
|
306 |
mov(R2, R12);
|
|
307 |
add(R0, SP, 24); // R0 == &oldval_lo
|
|
308 |
add(R1, SP, 16); // R1 == &newval_lo
|
|
309 |
mvn(R3, 0xf000); // call kernel helper at 0xffff0f60
|
|
310 |
mov(LR, PC);
|
|
311 |
sub(PC, R3, 0x9f);
|
|
312 |
b(loop, cc); // if Carry clear then oldval != current
|
|
313 |
// try again. Otherwise, return oldval
|
|
314 |
// Here on success
|
|
315 |
pop(RegisterSet(R0, R3));
|
|
316 |
mov(result, 1);
|
|
317 |
ldrd(memval_lo, Address(SP, 8));
|
|
318 |
bind(done);
|
|
319 |
pop(RegisterSet(newval_lo, newval_hi));
|
|
320 |
pop(RegisterSet(oldval_lo, oldval_hi));
|
|
321 |
mov(SP, R10); // restore SP
|
|
322 |
pop(RegisterSet(R10) | RegisterSet(LR));
|
|
323 |
if (result != R12) {
|
|
324 |
pop(R12);
|
|
325 |
}
|
|
326 |
} else {
|
|
327 |
stop("Atomic cmpxchg64 unsupported on this platform");
|
|
328 |
}
|
|
329 |
}
|