author | mbaesken |
Wed, 06 Nov 2019 14:04:07 +0100 | |
changeset 58959 | b7b170ba3ba9 |
parent 57777 | 90ead0febf56 |
permissions | -rw-r--r-- |
42065 | 1 |
/* |
55342
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54780
diff
changeset
|
2 |
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54780
diff
changeset
|
3 |
* Copyright (c) 2016, 2019, SAP SE. All rights reserved. |
42065 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/codeBuffer.hpp" |
|
28 |
#include "asm/macroAssembler.inline.hpp" |
|
29 |
#include "compiler/disassembler.hpp" |
|
49748 | 30 |
#include "gc/shared/barrierSet.hpp" |
31 |
#include "gc/shared/barrierSetAssembler.hpp" |
|
42065 | 32 |
#include "gc/shared/collectedHeap.inline.hpp" |
33 |
#include "interpreter/interpreter.hpp" |
|
49455
848864ed9b17
8199604: Rename CardTableModRefBS to CardTableBarrierSet
eosterlund
parents:
49449
diff
changeset
|
34 |
#include "gc/shared/cardTableBarrierSet.hpp" |
42065 | 35 |
#include "memory/resourceArea.hpp" |
36 |
#include "memory/universe.hpp" |
|
50446
39ca7558bc43
8203353: Fixup inferred decorators in the interpreter
eosterlund
parents:
50162
diff
changeset
|
37 |
#include "oops/accessDecorators.hpp" |
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
49455
diff
changeset
|
38 |
#include "oops/compressedOops.inline.hpp" |
42065 | 39 |
#include "oops/klass.inline.hpp" |
58959 | 40 |
#ifdef COMPILER2 |
42065 | 41 |
#include "opto/compile.hpp" |
42 |
#include "opto/intrinsicnode.hpp" |
|
43 |
#include "opto/matcher.hpp" |
|
58959 | 44 |
#endif |
42065 | 45 |
#include "prims/methodHandles.hpp" |
46 |
#include "registerSaver_s390.hpp" |
|
47 |
#include "runtime/biasedLocking.hpp" |
|
48 |
#include "runtime/icache.hpp" |
|
49449
ef5d5d343e2a
8199263: Split interfaceSupport.hpp to not require including .inline.hpp files
coleenp
parents:
49347
diff
changeset
|
49 |
#include "runtime/interfaceSupport.inline.hpp" |
42065 | 50 |
#include "runtime/objectMonitor.hpp" |
51 |
#include "runtime/os.hpp" |
|
48332
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
52 |
#include "runtime/safepoint.hpp" |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
53 |
#include "runtime/safepointMechanism.hpp" |
42065 | 54 |
#include "runtime/sharedRuntime.hpp" |
55 |
#include "runtime/stubRoutines.hpp" |
|
56 |
#include "utilities/events.hpp" |
|
57 |
#include "utilities/macros.hpp" |
|
58 |
||
59 |
#include <ucontext.h> |
|
60 |
||
61 |
#define BLOCK_COMMENT(str) block_comment(str) |
|
62 |
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") |
|
63 |
||
64 |
// Move 32-bit register if destination and source are different. |
|
65 |
void MacroAssembler::lr_if_needed(Register rd, Register rs) { |
|
66 |
if (rs != rd) { z_lr(rd, rs); } |
|
67 |
} |
|
68 |
||
69 |
// Move register if destination and source are different. |
|
70 |
void MacroAssembler::lgr_if_needed(Register rd, Register rs) { |
|
71 |
if (rs != rd) { z_lgr(rd, rs); } |
|
72 |
} |
|
73 |
||
74 |
// Zero-extend 32-bit register into 64-bit register if destination and source are different. |
|
75 |
void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { |
|
76 |
if (rs != rd) { z_llgfr(rd, rs); } |
|
77 |
} |
|
78 |
||
79 |
// Move float register if destination and source are different. |
|
80 |
void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { |
|
81 |
if (rs != rd) { z_ldr(rd, rs); } |
|
82 |
} |
|
83 |
||
84 |
// Move integer register if destination and source are different. |
|
85 |
// It is assumed that shorter-than-int types are already |
|
86 |
// appropriately sign-extended. |
|
87 |
void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, |
|
88 |
BasicType src_type) { |
|
89 |
assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); |
|
90 |
assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); |
|
91 |
||
92 |
if (dst_type == src_type) { |
|
93 |
lgr_if_needed(dst, src); // Just move all 64 bits. |
|
94 |
return; |
|
95 |
} |
|
96 |
||
97 |
switch (dst_type) { |
|
98 |
// Do not support these types for now. |
|
99 |
// case T_BOOLEAN: |
|
100 |
case T_BYTE: // signed byte |
|
101 |
switch (src_type) { |
|
102 |
case T_INT: |
|
103 |
z_lgbr(dst, src); |
|
104 |
break; |
|
105 |
default: |
|
106 |
ShouldNotReachHere(); |
|
107 |
} |
|
108 |
return; |
|
109 |
||
110 |
case T_CHAR: |
|
111 |
case T_SHORT: |
|
112 |
switch (src_type) { |
|
113 |
case T_INT: |
|
114 |
if (dst_type == T_CHAR) { |
|
115 |
z_llghr(dst, src); |
|
116 |
} else { |
|
117 |
z_lghr(dst, src); |
|
118 |
} |
|
119 |
break; |
|
120 |
default: |
|
121 |
ShouldNotReachHere(); |
|
122 |
} |
|
123 |
return; |
|
124 |
||
125 |
case T_INT: |
|
126 |
switch (src_type) { |
|
127 |
case T_BOOLEAN: |
|
128 |
case T_BYTE: |
|
129 |
case T_CHAR: |
|
130 |
case T_SHORT: |
|
131 |
case T_INT: |
|
132 |
case T_LONG: |
|
133 |
case T_OBJECT: |
|
134 |
case T_ARRAY: |
|
135 |
case T_VOID: |
|
136 |
case T_ADDRESS: |
|
137 |
lr_if_needed(dst, src); |
|
138 |
// llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). |
|
139 |
return; |
|
140 |
||
141 |
default: |
|
142 |
assert(false, "non-integer src type"); |
|
143 |
return; |
|
144 |
} |
|
145 |
case T_LONG: |
|
146 |
switch (src_type) { |
|
147 |
case T_BOOLEAN: |
|
148 |
case T_BYTE: |
|
149 |
case T_CHAR: |
|
150 |
case T_SHORT: |
|
151 |
case T_INT: |
|
152 |
z_lgfr(dst, src); // sign extension |
|
153 |
return; |
|
154 |
||
155 |
case T_LONG: |
|
156 |
case T_OBJECT: |
|
157 |
case T_ARRAY: |
|
158 |
case T_VOID: |
|
159 |
case T_ADDRESS: |
|
160 |
lgr_if_needed(dst, src); |
|
161 |
return; |
|
162 |
||
163 |
default: |
|
164 |
assert(false, "non-integer src type"); |
|
165 |
return; |
|
166 |
} |
|
167 |
return; |
|
168 |
case T_OBJECT: |
|
169 |
case T_ARRAY: |
|
170 |
case T_VOID: |
|
171 |
case T_ADDRESS: |
|
172 |
switch (src_type) { |
|
173 |
// These types don't make sense to be converted to pointers: |
|
174 |
// case T_BOOLEAN: |
|
175 |
// case T_BYTE: |
|
176 |
// case T_CHAR: |
|
177 |
// case T_SHORT: |
|
178 |
||
179 |
case T_INT: |
|
180 |
z_llgfr(dst, src); // zero extension |
|
181 |
return; |
|
182 |
||
183 |
case T_LONG: |
|
184 |
case T_OBJECT: |
|
185 |
case T_ARRAY: |
|
186 |
case T_VOID: |
|
187 |
case T_ADDRESS: |
|
188 |
lgr_if_needed(dst, src); |
|
189 |
return; |
|
190 |
||
191 |
default: |
|
192 |
assert(false, "non-integer src type"); |
|
193 |
return; |
|
194 |
} |
|
195 |
return; |
|
196 |
default: |
|
197 |
assert(false, "non-integer dst type"); |
|
198 |
return; |
|
199 |
} |
|
200 |
} |
|
201 |
||
202 |
// Move float register if destination and source are different. |
|
203 |
void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, |
|
204 |
FloatRegister src, BasicType src_type) { |
|
205 |
assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); |
|
206 |
assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); |
|
207 |
if (dst_type == src_type) { |
|
208 |
ldr_if_needed(dst, src); // Just move all 64 bits. |
|
209 |
} else { |
|
210 |
switch (dst_type) { |
|
211 |
case T_FLOAT: |
|
212 |
assert(src_type == T_DOUBLE, "invalid float type combination"); |
|
213 |
z_ledbr(dst, src); |
|
214 |
return; |
|
215 |
case T_DOUBLE: |
|
216 |
assert(src_type == T_FLOAT, "invalid float type combination"); |
|
217 |
z_ldebr(dst, src); |
|
218 |
return; |
|
219 |
default: |
|
220 |
assert(false, "non-float dst type"); |
|
221 |
return; |
|
222 |
} |
|
223 |
} |
|
224 |
} |
|
225 |
||
226 |
// Optimized emitter for reg to mem operations. |
|
227 |
// Uses modern instructions if running on modern hardware, classic instructions |
|
228 |
// otherwise. Prefers (usually shorter) classic instructions if applicable. |
|
229 |
// Data register (reg) cannot be used as work register. |
|
230 |
// |
|
231 |
// Don't rely on register locking, instead pass a scratch register (Z_R0 by default). |
|
232 |
// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! |
|
233 |
void MacroAssembler::freg2mem_opt(FloatRegister reg, |
|
234 |
int64_t disp, |
|
235 |
Register index, |
|
236 |
Register base, |
|
237 |
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), |
|
238 |
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), |
|
239 |
Register scratch) { |
|
240 |
index = (index == noreg) ? Z_R0 : index; |
|
241 |
if (Displacement::is_shortDisp(disp)) { |
|
242 |
(this->*classic)(reg, disp, index, base); |
|
243 |
} else { |
|
244 |
if (Displacement::is_validDisp(disp)) { |
|
245 |
(this->*modern)(reg, disp, index, base); |
|
246 |
} else { |
|
247 |
if (scratch != Z_R0 && scratch != Z_R1) { |
|
248 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
249 |
} else { |
|
250 |
if (scratch != Z_R0) { // scratch == Z_R1 |
|
251 |
if ((scratch == index) || (index == base)) { |
|
252 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
253 |
} else { |
|
254 |
add2reg(scratch, disp, base); |
|
255 |
(this->*classic)(reg, 0, index, scratch); |
|
256 |
if (base == scratch) { |
|
257 |
add2reg(base, -disp); // Restore base. |
|
258 |
} |
|
259 |
} |
|
260 |
} else { // scratch == Z_R0 |
|
261 |
z_lgr(scratch, base); |
|
262 |
add2reg(base, disp); |
|
263 |
(this->*classic)(reg, 0, index, base); |
|
264 |
z_lgr(base, scratch); // Restore base. |
|
265 |
} |
|
266 |
} |
|
267 |
} |
|
268 |
} |
|
269 |
} |
|
270 |
||
271 |
void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { |
|
272 |
if (is_double) { |
|
273 |
freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); |
|
274 |
} else { |
|
275 |
freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); |
|
276 |
} |
|
277 |
} |
|
278 |
||
279 |
// Optimized emitter for mem to reg operations. |
|
280 |
// Uses modern instructions if running on modern hardware, classic instructions |
|
281 |
// otherwise. Prefers (usually shorter) classic instructions if applicable. |
|
282 |
// data register (reg) cannot be used as work register. |
|
283 |
// |
|
284 |
// Don't rely on register locking, instead pass a scratch register (Z_R0 by default). |
|
285 |
// CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! |
|
286 |
void MacroAssembler::mem2freg_opt(FloatRegister reg, |
|
287 |
int64_t disp, |
|
288 |
Register index, |
|
289 |
Register base, |
|
290 |
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), |
|
291 |
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), |
|
292 |
Register scratch) { |
|
293 |
index = (index == noreg) ? Z_R0 : index; |
|
294 |
if (Displacement::is_shortDisp(disp)) { |
|
295 |
(this->*classic)(reg, disp, index, base); |
|
296 |
} else { |
|
297 |
if (Displacement::is_validDisp(disp)) { |
|
298 |
(this->*modern)(reg, disp, index, base); |
|
299 |
} else { |
|
300 |
if (scratch != Z_R0 && scratch != Z_R1) { |
|
301 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
302 |
} else { |
|
303 |
if (scratch != Z_R0) { // scratch == Z_R1 |
|
304 |
if ((scratch == index) || (index == base)) { |
|
305 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
306 |
} else { |
|
307 |
add2reg(scratch, disp, base); |
|
308 |
(this->*classic)(reg, 0, index, scratch); |
|
309 |
if (base == scratch) { |
|
310 |
add2reg(base, -disp); // Restore base. |
|
311 |
} |
|
312 |
} |
|
313 |
} else { // scratch == Z_R0 |
|
314 |
z_lgr(scratch, base); |
|
315 |
add2reg(base, disp); |
|
316 |
(this->*classic)(reg, 0, index, base); |
|
317 |
z_lgr(base, scratch); // Restore base. |
|
318 |
} |
|
319 |
} |
|
320 |
} |
|
321 |
} |
|
322 |
} |
|
323 |
||
324 |
void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { |
|
325 |
if (is_double) { |
|
326 |
mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); |
|
327 |
} else { |
|
328 |
mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); |
|
329 |
} |
|
330 |
} |
|
331 |
||
332 |
// Optimized emitter for reg to mem operations. |
|
333 |
// Uses modern instructions if running on modern hardware, classic instructions |
|
334 |
// otherwise. Prefers (usually shorter) classic instructions if applicable. |
|
335 |
// Data register (reg) cannot be used as work register. |
|
336 |
// |
|
337 |
// Don't rely on register locking, instead pass a scratch register |
|
338 |
// (Z_R0 by default) |
|
339 |
// CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! |
|
340 |
void MacroAssembler::reg2mem_opt(Register reg, |
|
341 |
int64_t disp, |
|
342 |
Register index, |
|
343 |
Register base, |
|
344 |
void (MacroAssembler::*modern) (Register, int64_t, Register, Register), |
|
345 |
void (MacroAssembler::*classic)(Register, int64_t, Register, Register), |
|
346 |
Register scratch) { |
|
347 |
index = (index == noreg) ? Z_R0 : index; |
|
348 |
if (Displacement::is_shortDisp(disp)) { |
|
349 |
(this->*classic)(reg, disp, index, base); |
|
350 |
} else { |
|
351 |
if (Displacement::is_validDisp(disp)) { |
|
352 |
(this->*modern)(reg, disp, index, base); |
|
353 |
} else { |
|
354 |
if (scratch != Z_R0 && scratch != Z_R1) { |
|
355 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
356 |
} else { |
|
357 |
if (scratch != Z_R0) { // scratch == Z_R1 |
|
358 |
if ((scratch == index) || (index == base)) { |
|
359 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
360 |
} else { |
|
361 |
add2reg(scratch, disp, base); |
|
362 |
(this->*classic)(reg, 0, index, scratch); |
|
363 |
if (base == scratch) { |
|
364 |
add2reg(base, -disp); // Restore base. |
|
365 |
} |
|
366 |
} |
|
367 |
} else { // scratch == Z_R0 |
|
368 |
if ((scratch == reg) || (scratch == base) || (reg == base)) { |
|
369 |
(this->*modern)(reg, disp, index, base); // Will fail with disp out of range. |
|
370 |
} else { |
|
371 |
z_lgr(scratch, base); |
|
372 |
add2reg(base, disp); |
|
373 |
(this->*classic)(reg, 0, index, base); |
|
374 |
z_lgr(base, scratch); // Restore base. |
|
375 |
} |
|
376 |
} |
|
377 |
} |
|
378 |
} |
|
379 |
} |
|
380 |
} |
|
381 |
||
382 |
int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { |
|
383 |
int store_offset = offset(); |
|
384 |
if (is_double) { |
|
385 |
reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); |
|
386 |
} else { |
|
387 |
reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); |
|
388 |
} |
|
389 |
return store_offset; |
|
390 |
} |
|
391 |
||
392 |
// Optimized emitter for mem to reg operations. |
|
393 |
// Uses modern instructions if running on modern hardware, classic instructions |
|
394 |
// otherwise. Prefers (usually shorter) classic instructions if applicable. |
|
395 |
// Data register (reg) will be used as work register where possible. |
|
396 |
void MacroAssembler::mem2reg_opt(Register reg, |
|
397 |
int64_t disp, |
|
398 |
Register index, |
|
399 |
Register base, |
|
400 |
void (MacroAssembler::*modern) (Register, int64_t, Register, Register), |
|
401 |
void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { |
|
402 |
index = (index == noreg) ? Z_R0 : index; |
|
403 |
if (Displacement::is_shortDisp(disp)) { |
|
404 |
(this->*classic)(reg, disp, index, base); |
|
405 |
} else { |
|
406 |
if (Displacement::is_validDisp(disp)) { |
|
407 |
(this->*modern)(reg, disp, index, base); |
|
408 |
} else { |
|
409 |
if ((reg == index) && (reg == base)) { |
|
410 |
z_sllg(reg, reg, 1); |
|
411 |
add2reg(reg, disp); |
|
412 |
(this->*classic)(reg, 0, noreg, reg); |
|
413 |
} else if ((reg == index) && (reg != Z_R0)) { |
|
414 |
add2reg(reg, disp); |
|
415 |
(this->*classic)(reg, 0, reg, base); |
|
416 |
} else if (reg == base) { |
|
417 |
add2reg(reg, disp); |
|
418 |
(this->*classic)(reg, 0, index, reg); |
|
419 |
} else if (reg != Z_R0) { |
|
420 |
add2reg(reg, disp, base); |
|
421 |
(this->*classic)(reg, 0, index, reg); |
|
422 |
} else { // reg == Z_R0 && reg != base here |
|
423 |
add2reg(base, disp); |
|
424 |
(this->*classic)(reg, 0, index, base); |
|
425 |
add2reg(base, -disp); |
|
426 |
} |
|
427 |
} |
|
428 |
} |
|
429 |
} |
|
430 |
||
431 |
void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { |
|
432 |
if (is_double) { |
|
433 |
z_lg(reg, a); |
|
434 |
} else { |
|
435 |
mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); |
|
436 |
} |
|
437 |
} |
|
438 |
||
439 |
void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { |
|
440 |
mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); |
|
441 |
} |
|
442 |
||
443 |
void MacroAssembler::and_imm(Register r, long mask, |
|
444 |
Register tmp /* = Z_R0 */, |
|
445 |
bool wide /* = false */) { |
|
446 |
assert(wide || Immediate::is_simm32(mask), "mask value too large"); |
|
447 |
||
448 |
if (!wide) { |
|
449 |
z_nilf(r, mask); |
|
450 |
return; |
|
451 |
} |
|
452 |
||
453 |
assert(r != tmp, " need a different temporary register !"); |
|
454 |
load_const_optimized(tmp, mask); |
|
455 |
z_ngr(r, tmp); |
|
456 |
} |
|
457 |
||
458 |
// Calculate the 1's complement. |
|
459 |
// Note: The condition code is neither preserved nor correctly set by this code!!! |
|
460 |
// Note: (wide == false) does not protect the high order half of the target register |
|
461 |
// from alteration. It only serves as optimization hint for 32-bit results. |
|
462 |
void MacroAssembler::not_(Register r1, Register r2, bool wide) { |
|
463 |
||
464 |
if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. |
|
465 |
z_xilf(r1, -1); |
|
466 |
if (wide) { |
|
467 |
z_xihf(r1, -1); |
|
468 |
} |
|
469 |
} else { // Distinct src and dst registers. |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
470 |
load_const_optimized(r1, -1); |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
471 |
z_xgr(r1, r2); |
42065 | 472 |
} |
473 |
} |
|
474 |
||
475 |
unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { |
|
476 |
assert(lBitPos >= 0, "zero is leftmost bit position"); |
|
477 |
assert(rBitPos <= 63, "63 is rightmost bit position"); |
|
478 |
assert(lBitPos <= rBitPos, "inverted selection interval"); |
|
479 |
return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); |
|
480 |
} |
|
481 |
||
482 |
// Helper function for the "Rotate_then_<logicalOP>" emitters. |
|
483 |
// Rotate src, then mask register contents such that only bits in range survive. |
|
484 |
// For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. |
|
485 |
// For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. |
|
486 |
// The caller must ensure that the selected range only contains bits with defined value. |
|
487 |
void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, |
|
488 |
int nRotate, bool src32bit, bool dst32bit, bool oneBits) { |
|
489 |
assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); |
|
490 |
bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). |
|
491 |
bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). |
|
492 |
// Pre-determine which parts of dst will be zero after shift/rotate. |
|
493 |
bool llZero = sll4rll && (nRotate >= 16); |
|
494 |
bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); |
|
495 |
bool lfZero = llZero && lhZero; |
|
496 |
bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); |
|
497 |
bool hhZero = (srl4rll && (nRotate <= -16)); |
|
498 |
bool hfZero = hlZero && hhZero; |
|
499 |
||
500 |
// rotate then mask src operand. |
|
501 |
// if oneBits == true, all bits outside selected range are 1s. |
|
502 |
// if oneBits == false, all bits outside selected range are 0s. |
|
503 |
if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. |
|
504 |
if (dst32bit) { |
|
505 |
z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. |
|
506 |
} else { |
|
507 |
if (sll4rll) { z_sllg(dst, src, nRotate); } |
|
508 |
else if (srl4rll) { z_srlg(dst, src, -nRotate); } |
|
509 |
else { z_rllg(dst, src, nRotate); } |
|
510 |
} |
|
511 |
} else { |
|
512 |
if (sll4rll) { z_sllg(dst, src, nRotate); } |
|
513 |
else if (srl4rll) { z_srlg(dst, src, -nRotate); } |
|
514 |
else { z_rllg(dst, src, nRotate); } |
|
515 |
} |
|
516 |
||
517 |
unsigned long range_mask = create_mask(lBitPos, rBitPos); |
|
518 |
unsigned int range_mask_h = (unsigned int)(range_mask >> 32); |
|
519 |
unsigned int range_mask_l = (unsigned int)range_mask; |
|
520 |
unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); |
|
521 |
unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); |
|
522 |
unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); |
|
523 |
unsigned short range_mask_ll = (unsigned short)range_mask; |
|
524 |
// Works for z9 and newer H/W. |
|
525 |
if (oneBits) { |
|
526 |
if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. |
|
527 |
if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } |
|
528 |
} else { |
|
529 |
// All bits outside range become 0s |
|
530 |
if (((~range_mask_l) != 0) && !lfZero) { |
|
531 |
z_nilf(dst, range_mask_l); |
|
532 |
} |
|
533 |
if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { |
|
534 |
z_nihf(dst, range_mask_h); |
|
535 |
} |
|
536 |
} |
|
537 |
} |
|
538 |
||
539 |
// Rotate src, then insert selected range from rotated src into dst. |
|
540 |
// Clear dst before, if requested. |
|
541 |
void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, |
|
542 |
int nRotate, bool clear_dst) { |
|
543 |
// This version does not depend on src being zero-extended int2long. |
|
544 |
nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. |
|
545 |
z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. |
|
546 |
} |
|
547 |
||
548 |
// Rotate src, then and selected range from rotated src into dst. |
|
549 |
// Set condition code only if so requested. Otherwise it is unpredictable. |
|
550 |
// See performance note in macroAssembler_s390.hpp for important information. |
|
551 |
void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, |
|
552 |
int nRotate, bool test_only) { |
|
553 |
guarantee(!test_only, "Emitter not fit for test_only instruction variant."); |
|
554 |
// This version does not depend on src being zero-extended int2long. |
|
555 |
nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. |
|
556 |
z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. |
|
557 |
} |
|
558 |
||
559 |
// Rotate src, then or selected range from rotated src into dst. |
|
560 |
// Set condition code only if so requested. Otherwise it is unpredictable. |
|
561 |
// See performance note in macroAssembler_s390.hpp for important information. |
|
562 |
void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, |
|
563 |
int nRotate, bool test_only) { |
|
564 |
guarantee(!test_only, "Emitter not fit for test_only instruction variant."); |
|
565 |
// This version does not depend on src being zero-extended int2long. |
|
566 |
nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. |
|
567 |
z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. |
|
568 |
} |
|
569 |
||
570 |
// Rotate src, then xor selected range from rotated src into dst. |
|
571 |
// Set condition code only if so requested. Otherwise it is unpredictable. |
|
572 |
// See performance note in macroAssembler_s390.hpp for important information. |
|
573 |
void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, |
|
574 |
int nRotate, bool test_only) { |
|
575 |
guarantee(!test_only, "Emitter not fit for test_only instruction variant."); |
|
576 |
// This version does not depend on src being zero-extended int2long. |
|
577 |
nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. |
|
578 |
z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. |
|
579 |
} |
|
580 |
||
581 |
void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { |
|
582 |
if (inc.is_register()) { |
|
583 |
z_agr(r1, inc.as_register()); |
|
584 |
} else { // constant |
|
585 |
intptr_t imm = inc.as_constant(); |
|
586 |
add2reg(r1, imm); |
|
587 |
} |
|
588 |
} |
|
589 |
// Helper function to multiply the 64bit contents of a register by a 16bit constant. |
|
590 |
// The optimization tries to avoid the mghi instruction, since it uses the FPU for |
|
591 |
// calculation and is thus rather slow. |
|
592 |
// |
|
593 |
// There is no handling for special cases, e.g. cval==0 or cval==1. |
|
594 |
// |
|
595 |
// Returns len of generated code block. |
|
596 |
unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { |
|
597 |
int block_start = offset(); |
|
598 |
||
599 |
bool sign_flip = cval < 0; |
|
600 |
cval = sign_flip ? -cval : cval; |
|
601 |
||
602 |
BLOCK_COMMENT("Reg64*Con16 {"); |
|
603 |
||
604 |
int bit1 = cval & -cval; |
|
605 |
if (bit1 == cval) { |
|
606 |
z_sllg(rval, rval, exact_log2(bit1)); |
|
607 |
if (sign_flip) { z_lcgr(rval, rval); } |
|
608 |
} else { |
|
609 |
int bit2 = (cval-bit1) & -(cval-bit1); |
|
610 |
if ((bit1+bit2) == cval) { |
|
611 |
z_sllg(work, rval, exact_log2(bit1)); |
|
612 |
z_sllg(rval, rval, exact_log2(bit2)); |
|
613 |
z_agr(rval, work); |
|
614 |
if (sign_flip) { z_lcgr(rval, rval); } |
|
615 |
} else { |
|
616 |
if (sign_flip) { z_mghi(rval, -cval); } |
|
617 |
else { z_mghi(rval, cval); } |
|
618 |
} |
|
619 |
} |
|
620 |
BLOCK_COMMENT("} Reg64*Con16"); |
|
621 |
||
622 |
int block_end = offset(); |
|
623 |
return block_end - block_start; |
|
624 |
} |
|
625 |
||
626 |
// Generic operation r1 := r2 + imm. |
|
627 |
// |
|
628 |
// Should produce the best code for each supported CPU version. |
|
629 |
// r2 == noreg yields r1 := r1 + imm |
|
630 |
// imm == 0 emits either no instruction or r1 := r2 ! |
|
631 |
// NOTES: 1) Don't use this function where fixed sized |
|
632 |
// instruction sequences are required!!! |
|
633 |
// 2) Don't use this function if condition code |
|
634 |
// setting is required! |
|
635 |
// 3) Despite being declared as int64_t, the parameter imm |
|
636 |
// must be a simm_32 value (= signed 32-bit integer). |
|
637 |
void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { |
|
638 |
assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); |
|
639 |
||
640 |
if (r2 == noreg) { r2 = r1; } |
|
641 |
||
642 |
// Handle special case imm == 0. |
|
643 |
if (imm == 0) { |
|
644 |
lgr_if_needed(r1, r2); |
|
645 |
// Nothing else to do. |
|
646 |
return; |
|
647 |
} |
|
648 |
||
649 |
if (!PreferLAoverADD || (r2 == Z_R0)) { |
|
650 |
bool distinctOpnds = VM_Version::has_DistinctOpnds(); |
|
651 |
||
652 |
// Can we encode imm in 16 bits signed? |
|
653 |
if (Immediate::is_simm16(imm)) { |
|
654 |
if (r1 == r2) { |
|
655 |
z_aghi(r1, imm); |
|
656 |
return; |
|
657 |
} |
|
658 |
if (distinctOpnds) { |
|
659 |
z_aghik(r1, r2, imm); |
|
660 |
return; |
|
661 |
} |
|
662 |
z_lgr(r1, r2); |
|
663 |
z_aghi(r1, imm); |
|
664 |
return; |
|
665 |
} |
|
666 |
} else { |
|
667 |
// Can we encode imm in 12 bits unsigned? |
|
668 |
if (Displacement::is_shortDisp(imm)) { |
|
669 |
z_la(r1, imm, r2); |
|
670 |
return; |
|
671 |
} |
|
672 |
// Can we encode imm in 20 bits signed? |
|
673 |
if (Displacement::is_validDisp(imm)) { |
|
674 |
// Always use LAY instruction, so we don't need the tmp register. |
|
675 |
z_lay(r1, imm, r2); |
|
676 |
return; |
|
677 |
} |
|
678 |
||
679 |
} |
|
680 |
||
681 |
// Can handle it (all possible values) with long immediates. |
|
682 |
lgr_if_needed(r1, r2); |
|
683 |
z_agfi(r1, imm); |
|
684 |
} |
|
685 |
||
686 |
// Generic operation r := b + x + d |
|
687 |
// |
|
688 |
// Addition of several operands with address generation semantics - sort of: |
|
689 |
// - no restriction on the registers. Any register will do for any operand. |
|
690 |
// - x == noreg: operand will be disregarded. |
|
691 |
// - b == noreg: will use (contents of) result reg as operand (r := r + d). |
|
692 |
// - x == Z_R0: just disregard |
|
693 |
// - b == Z_R0: use as operand. This is not address generation semantics!!! |
|
694 |
// |
|
695 |
// The same restrictions as on add2reg() are valid!!! |
|
696 |
void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { |
|
697 |
assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); |
|
698 |
||
699 |
if (x == noreg) { x = Z_R0; } |
|
700 |
if (b == noreg) { b = r; } |
|
701 |
||
702 |
// Handle special case x == R0. |
|
703 |
if (x == Z_R0) { |
|
704 |
// Can simply add the immediate value to the base register. |
|
705 |
add2reg(r, d, b); |
|
706 |
return; |
|
707 |
} |
|
708 |
||
709 |
if (!PreferLAoverADD || (b == Z_R0)) { |
|
710 |
bool distinctOpnds = VM_Version::has_DistinctOpnds(); |
|
711 |
// Handle special case d == 0. |
|
712 |
if (d == 0) { |
|
713 |
if (b == x) { z_sllg(r, b, 1); return; } |
|
714 |
if (r == x) { z_agr(r, b); return; } |
|
715 |
if (r == b) { z_agr(r, x); return; } |
|
716 |
if (distinctOpnds) { z_agrk(r, x, b); return; } |
|
717 |
z_lgr(r, b); |
|
718 |
z_agr(r, x); |
|
719 |
} else { |
|
720 |
if (x == b) { z_sllg(r, x, 1); } |
|
721 |
else if (r == x) { z_agr(r, b); } |
|
722 |
else if (r == b) { z_agr(r, x); } |
|
723 |
else if (distinctOpnds) { z_agrk(r, x, b); } |
|
724 |
else { |
|
725 |
z_lgr(r, b); |
|
726 |
z_agr(r, x); |
|
727 |
} |
|
728 |
add2reg(r, d); |
|
729 |
} |
|
730 |
} else { |
|
731 |
// Can we encode imm in 12 bits unsigned? |
|
732 |
if (Displacement::is_shortDisp(d)) { |
|
733 |
z_la(r, d, x, b); |
|
734 |
return; |
|
735 |
} |
|
736 |
// Can we encode imm in 20 bits signed? |
|
737 |
if (Displacement::is_validDisp(d)) { |
|
738 |
z_lay(r, d, x, b); |
|
739 |
return; |
|
740 |
} |
|
741 |
z_la(r, 0, x, b); |
|
742 |
add2reg(r, d); |
|
743 |
} |
|
744 |
} |
|
745 |
||
746 |
// Generic emitter (32bit) for direct memory increment. |
|
747 |
// For optimal code, do not specify Z_R0 as temp register. |
|
748 |
void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { |
|
749 |
if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { |
|
750 |
z_asi(a, imm); |
|
751 |
} else { |
|
752 |
z_lgf(tmp, a); |
|
753 |
add2reg(tmp, imm); |
|
754 |
z_st(tmp, a); |
|
755 |
} |
|
756 |
} |
|
757 |
||
758 |
void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { |
|
759 |
if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { |
|
760 |
z_agsi(a, imm); |
|
761 |
} else { |
|
762 |
z_lg(tmp, a); |
|
763 |
add2reg(tmp, imm); |
|
764 |
z_stg(tmp, a); |
|
765 |
} |
|
766 |
} |
|
767 |
||
768 |
void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { |
|
769 |
switch (size_in_bytes) { |
|
770 |
case 8: z_lg(dst, src); break; |
|
771 |
case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; |
|
772 |
case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; |
|
773 |
case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; |
|
774 |
default: ShouldNotReachHere(); |
|
775 |
} |
|
776 |
} |
|
777 |
||
778 |
void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { |
|
779 |
switch (size_in_bytes) { |
|
780 |
case 8: z_stg(src, dst); break; |
|
781 |
case 4: z_st(src, dst); break; |
|
782 |
case 2: z_sth(src, dst); break; |
|
783 |
case 1: z_stc(src, dst); break; |
|
784 |
default: ShouldNotReachHere(); |
|
785 |
} |
|
786 |
} |
|
787 |
||
788 |
// Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and |
|
789 |
// a high-order summand in register tmp. |
|
790 |
// |
|
791 |
// return value: < 0: No split required, si20 actually has property uimm12. |
|
792 |
// >= 0: Split performed. Use return value as uimm12 displacement and |
|
793 |
// tmp as index register. |
|
794 |
int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { |
|
795 |
assert(Immediate::is_simm20(si20_offset), "sanity"); |
|
796 |
int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. |
|
797 |
int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. |
|
798 |
assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || |
|
799 |
!Displacement::is_shortDisp(si20_offset), "unexpected offset values"); |
|
800 |
assert((lg_off+ll_off) == si20_offset, "offset splitup error"); |
|
801 |
||
802 |
Register work = accumulate? Z_R0 : tmp; |
|
803 |
||
804 |
if (fixed_codelen) { // Len of code = 10 = 4 + 6. |
|
805 |
z_lghi(work, ll_off>>12); // Implicit sign extension. |
|
806 |
z_slag(work, work, 12); |
|
807 |
} else { // Len of code = 0..10. |
|
808 |
if (ll_off == 0) { return -1; } |
|
809 |
// ll_off has 8 significant bits (at most) plus sign. |
|
810 |
if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. |
|
811 |
z_llilh(work, ll_off >> 16); |
|
812 |
if (ll_off < 0) { // Sign-extension required. |
|
813 |
z_lgfr(work, work); |
|
814 |
} |
|
815 |
} else { |
|
816 |
if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. |
|
817 |
z_llill(work, ll_off); |
|
818 |
} else { // Non-zero bits in both halfbytes. |
|
819 |
z_lghi(work, ll_off>>12); // Implicit sign extension. |
|
820 |
z_slag(work, work, 12); |
|
821 |
} |
|
822 |
} |
|
823 |
} |
|
824 |
if (accumulate) { z_algr(tmp, work); } // len of code += 4 |
|
825 |
return lg_off; |
|
826 |
} |
|
827 |
||
828 |
void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { |
|
829 |
if (Displacement::is_validDisp(si20)) { |
|
830 |
z_ley(t, si20, a); |
|
831 |
} else { |
|
832 |
// Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset |
|
833 |
// does not depend on si20 (scratch buffer emit size == code buffer emit size for constant |
|
834 |
// pool loads). |
|
835 |
bool accumulate = true; |
|
836 |
bool fixed_codelen = true; |
|
837 |
Register work; |
|
838 |
||
839 |
if (fixed_codelen) { |
|
840 |
z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. |
|
841 |
} else { |
|
842 |
accumulate = (a == tmp); |
|
843 |
} |
|
844 |
work = tmp; |
|
845 |
||
846 |
int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); |
|
847 |
if (disp12 < 0) { |
|
848 |
z_le(t, si20, work); |
|
849 |
} else { |
|
850 |
if (accumulate) { |
|
851 |
z_le(t, disp12, work); |
|
852 |
} else { |
|
853 |
z_le(t, disp12, work, a); |
|
854 |
} |
|
855 |
} |
|
856 |
} |
|
857 |
} |
|
858 |
||
859 |
void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { |
|
860 |
if (Displacement::is_validDisp(si20)) { |
|
861 |
z_ldy(t, si20, a); |
|
862 |
} else { |
|
863 |
// Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset |
|
864 |
// does not depend on si20 (scratch buffer emit size == code buffer emit size for constant |
|
865 |
// pool loads). |
|
866 |
bool accumulate = true; |
|
867 |
bool fixed_codelen = true; |
|
868 |
Register work; |
|
869 |
||
870 |
if (fixed_codelen) { |
|
871 |
z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. |
|
872 |
} else { |
|
873 |
accumulate = (a == tmp); |
|
874 |
} |
|
875 |
work = tmp; |
|
876 |
||
877 |
int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); |
|
878 |
if (disp12 < 0) { |
|
879 |
z_ld(t, si20, work); |
|
880 |
} else { |
|
881 |
if (accumulate) { |
|
882 |
z_ld(t, disp12, work); |
|
883 |
} else { |
|
884 |
z_ld(t, disp12, work, a); |
|
885 |
} |
|
886 |
} |
|
887 |
} |
|
888 |
} |
|
889 |
||
890 |
// PCrelative TOC access. |
|
891 |
// Returns distance (in bytes) from current position to start of consts section. |
|
892 |
// Returns 0 (zero) if no consts section exists or if it has size zero. |
|
893 |
long MacroAssembler::toc_distance() { |
|
894 |
CodeSection* cs = code()->consts(); |
|
895 |
return (long)((cs != NULL) ? cs->start()-pc() : 0); |
|
896 |
} |
|
897 |
||
898 |
// Implementation on x86/sparc assumes that constant and instruction section are |
|
899 |
// adjacent, but this doesn't hold. Two special situations may occur, that we must |
|
900 |
// be able to handle: |
|
901 |
// 1. const section may be located apart from the inst section. |
|
902 |
// 2. const section may be empty |
|
903 |
// In both cases, we use the const section's start address to compute the "TOC", |
|
904 |
// this seems to occur only temporarily; in the final step we always seem to end up |
|
905 |
// with the pc-relatice variant. |
|
906 |
// |
|
907 |
// PC-relative offset could be +/-2**32 -> use long for disp |
|
908 |
// Furthermore: makes no sense to have special code for |
|
909 |
// adjacent const and inst sections. |
|
910 |
void MacroAssembler::load_toc(Register Rtoc) { |
|
911 |
// Simply use distance from start of const section (should be patched in the end). |
|
912 |
long disp = toc_distance(); |
|
913 |
||
914 |
RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); |
|
915 |
relocate(rspec); |
|
916 |
z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. |
|
917 |
} |
|
918 |
||
919 |
// PCrelative TOC access. |
|
920 |
// Load from anywhere pcrelative (with relocation of load instr) |
|
921 |
void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { |
|
922 |
address pc = this->pc(); |
|
923 |
ptrdiff_t total_distance = dataLocation - pc; |
|
924 |
RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); |
|
925 |
||
926 |
assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); |
|
927 |
assert(total_distance != 0, "sanity"); |
|
928 |
||
929 |
// Some extra safety net. |
|
930 |
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
931 |
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); |
42065 | 932 |
} |
933 |
||
934 |
(this)->relocate(rspec, relocInfo::pcrel_addr_format); |
|
935 |
z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); |
|
936 |
} |
|
937 |
||
938 |
||
939 |
// PCrelative TOC access. |
|
940 |
// Load from anywhere pcrelative (with relocation of load instr) |
|
941 |
// loaded addr has to be relocated when added to constant pool. |
|
942 |
void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { |
|
943 |
address pc = this->pc(); |
|
944 |
ptrdiff_t total_distance = addrLocation - pc; |
|
945 |
RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); |
|
946 |
||
947 |
assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); |
|
948 |
||
949 |
// Some extra safety net. |
|
950 |
if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
951 |
guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); |
42065 | 952 |
} |
953 |
||
954 |
(this)->relocate(rspec, relocInfo::pcrel_addr_format); |
|
955 |
z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); |
|
956 |
} |
|
957 |
||
958 |
// Generic operation: load a value from memory and test. |
|
959 |
// CondCode indicates the sign (<0, ==0, >0) of the loaded value. |
|
960 |
void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { |
|
961 |
z_lb(dst, a); |
|
962 |
z_ltr(dst, dst); |
|
963 |
} |
|
964 |
||
965 |
void MacroAssembler::load_and_test_short(Register dst, const Address &a) { |
|
966 |
int64_t disp = a.disp20(); |
|
967 |
if (Displacement::is_shortDisp(disp)) { |
|
968 |
z_lh(dst, a); |
|
969 |
} else if (Displacement::is_longDisp(disp)) { |
|
970 |
z_lhy(dst, a); |
|
971 |
} else { |
|
972 |
guarantee(false, "displacement out of range"); |
|
973 |
} |
|
974 |
z_ltr(dst, dst); |
|
975 |
} |
|
976 |
||
977 |
void MacroAssembler::load_and_test_int(Register dst, const Address &a) { |
|
978 |
z_lt(dst, a); |
|
979 |
} |
|
980 |
||
981 |
void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { |
|
982 |
z_ltgf(dst, a); |
|
983 |
} |
|
984 |
||
985 |
void MacroAssembler::load_and_test_long(Register dst, const Address &a) { |
|
986 |
z_ltg(dst, a); |
|
987 |
} |
|
988 |
||
989 |
// Test a bit in memory. |
|
990 |
void MacroAssembler::testbit(const Address &a, unsigned int bit) { |
|
991 |
assert(a.index() == noreg, "no index reg allowed in testbit"); |
|
992 |
if (bit <= 7) { |
|
993 |
z_tm(a.disp() + 3, a.base(), 1 << bit); |
|
994 |
} else if (bit <= 15) { |
|
995 |
z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); |
|
996 |
} else if (bit <= 23) { |
|
997 |
z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); |
|
998 |
} else if (bit <= 31) { |
|
999 |
z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); |
|
1000 |
} else { |
|
1001 |
ShouldNotReachHere(); |
|
1002 |
} |
|
1003 |
} |
|
1004 |
||
1005 |
// Test a bit in a register. Result is reflected in CC. |
|
1006 |
void MacroAssembler::testbit(Register r, unsigned int bitPos) { |
|
1007 |
if (bitPos < 16) { |
|
1008 |
z_tmll(r, 1U<<bitPos); |
|
1009 |
} else if (bitPos < 32) { |
|
1010 |
z_tmlh(r, 1U<<(bitPos-16)); |
|
1011 |
} else if (bitPos < 48) { |
|
1012 |
z_tmhl(r, 1U<<(bitPos-32)); |
|
1013 |
} else if (bitPos < 64) { |
|
1014 |
z_tmhh(r, 1U<<(bitPos-48)); |
|
1015 |
} else { |
|
1016 |
ShouldNotReachHere(); |
|
1017 |
} |
|
1018 |
} |
|
1019 |
||
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1020 |
void MacroAssembler::prefetch_read(Address a) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1021 |
z_pfd(1, a.disp20(), a.indexOrR0(), a.base()); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1022 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1023 |
void MacroAssembler::prefetch_update(Address a) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1024 |
z_pfd(2, a.disp20(), a.indexOrR0(), a.base()); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1025 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
1026 |
|
42065 | 1027 |
// Clear a register, i.e. load const zero into reg. |
1028 |
// Return len (in bytes) of generated instruction(s). |
|
1029 |
// whole_reg: Clear 64 bits if true, 32 bits otherwise. |
|
1030 |
// set_cc: Use instruction that sets the condition code, if true. |
|
1031 |
int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { |
|
1032 |
unsigned int start_off = offset(); |
|
1033 |
if (whole_reg) { |
|
1034 |
set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); |
|
1035 |
} else { // Only 32bit register. |
|
1036 |
set_cc ? z_xr(r, r) : z_lhi(r, 0); |
|
1037 |
} |
|
1038 |
return offset() - start_off; |
|
1039 |
} |
|
1040 |
||
1041 |
#ifdef ASSERT |
|
1042 |
int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { |
|
1043 |
switch (pattern_len) { |
|
1044 |
case 1: |
|
1045 |
pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); |
|
1046 |
case 2: |
|
1047 |
pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); |
|
1048 |
case 4: |
|
1049 |
pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); |
|
1050 |
case 8: |
|
1051 |
return load_const_optimized_rtn_len(r, pattern, true); |
|
1052 |
break; |
|
1053 |
default: |
|
1054 |
guarantee(false, "preset_reg: bad len"); |
|
1055 |
} |
|
1056 |
return 0; |
|
1057 |
} |
|
1058 |
#endif |
|
1059 |
||
1060 |
// addr: Address descriptor of memory to clear index register will not be used ! |
|
1061 |
// size: Number of bytes to clear. |
|
1062 |
// !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! |
|
1063 |
// !!! Use store_const() instead !!! |
|
1064 |
void MacroAssembler::clear_mem(const Address& addr, unsigned size) { |
|
1065 |
guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); |
|
1066 |
||
1067 |
if (size == 1) { |
|
1068 |
z_mvi(addr, 0); |
|
1069 |
return; |
|
1070 |
} |
|
1071 |
||
1072 |
switch (size) { |
|
1073 |
case 2: z_mvhhi(addr, 0); |
|
1074 |
return; |
|
1075 |
case 4: z_mvhi(addr, 0); |
|
1076 |
return; |
|
1077 |
case 8: z_mvghi(addr, 0); |
|
1078 |
return; |
|
1079 |
default: ; // Fallthru to xc. |
|
1080 |
} |
|
1081 |
||
1082 |
z_xc(addr, size, addr); |
|
1083 |
} |
|
1084 |
||
1085 |
void MacroAssembler::align(int modulus) { |
|
1086 |
while (offset() % modulus != 0) z_nop(); |
|
1087 |
} |
|
1088 |
||
1089 |
// Special version for non-relocateable code if required alignment |
|
1090 |
// is larger than CodeEntryAlignment. |
|
1091 |
void MacroAssembler::align_address(int modulus) { |
|
1092 |
while ((uintptr_t)pc() % modulus != 0) z_nop(); |
|
1093 |
} |
|
1094 |
||
1095 |
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, |
|
1096 |
Register temp_reg, |
|
1097 |
int64_t extra_slot_offset) { |
|
1098 |
// On Z, we can have index and disp in an Address. So don't call argument_offset, |
|
1099 |
// which issues an unnecessary add instruction. |
|
1100 |
int stackElementSize = Interpreter::stackElementSize; |
|
1101 |
int64_t offset = extra_slot_offset * stackElementSize; |
|
1102 |
const Register argbase = Z_esp; |
|
1103 |
if (arg_slot.is_constant()) { |
|
1104 |
offset += arg_slot.as_constant() * stackElementSize; |
|
1105 |
return Address(argbase, offset); |
|
1106 |
} |
|
1107 |
// else |
|
1108 |
assert(temp_reg != noreg, "must specify"); |
|
1109 |
assert(temp_reg != Z_ARG1, "base and index are conflicting"); |
|
1110 |
z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 |
|
1111 |
return Address(argbase, temp_reg, offset); |
|
1112 |
} |
|
1113 |
||
1114 |
||
1115 |
//=================================================================== |
|
1116 |
//=== START C O N S T A N T S I N C O D E S T R E A M === |
|
1117 |
//=================================================================== |
|
1118 |
//=== P A T CH A B L E C O N S T A N T S === |
|
1119 |
//=================================================================== |
|
1120 |
||
1121 |
||
1122 |
//--------------------------------------------------- |
|
1123 |
// Load (patchable) constant into register |
|
1124 |
//--------------------------------------------------- |
|
1125 |
||
1126 |
||
1127 |
// Load absolute address (and try to optimize). |
|
1128 |
// Note: This method is usable only for position-fixed code, |
|
1129 |
// referring to a position-fixed target location. |
|
1130 |
// If not so, relocations and patching must be used. |
|
1131 |
void MacroAssembler::load_absolute_address(Register d, address addr) { |
|
1132 |
assert(addr != NULL, "should not happen"); |
|
1133 |
BLOCK_COMMENT("load_absolute_address:"); |
|
1134 |
if (addr == NULL) { |
|
1135 |
z_larl(d, pc()); // Dummy emit for size calc. |
|
1136 |
return; |
|
1137 |
} |
|
1138 |
||
1139 |
if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { |
|
1140 |
z_larl(d, addr); |
|
1141 |
return; |
|
1142 |
} |
|
1143 |
||
1144 |
load_const_optimized(d, (long)addr); |
|
1145 |
} |
|
1146 |
||
1147 |
// Load a 64bit constant. |
|
1148 |
// Patchable code sequence, but not atomically patchable. |
|
1149 |
// Make sure to keep code size constant -> no value-dependent optimizations. |
|
1150 |
// Do not kill condition code. |
|
1151 |
void MacroAssembler::load_const(Register t, long x) { |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1152 |
// Note: Right shift is only cleanly defined for unsigned types |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1153 |
// or for signed types with nonnegative values. |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1154 |
Assembler::z_iihf(t, (long)((unsigned long)x >> 32)); |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1155 |
Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL)); |
42065 | 1156 |
} |
1157 |
||
1158 |
// Load a 32bit constant into a 64bit register, sign-extend or zero-extend. |
|
1159 |
// Patchable code sequence, but not atomically patchable. |
|
1160 |
// Make sure to keep code size constant -> no value-dependent optimizations. |
|
1161 |
// Do not kill condition code. |
|
1162 |
void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { |
|
1163 |
if (sign_extend) { Assembler::z_lgfi(t, x); } |
|
1164 |
else { Assembler::z_llilf(t, x); } |
|
1165 |
} |
|
1166 |
||
1167 |
// Load narrow oop constant, no decompression. |
|
1168 |
void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { |
|
1169 |
assert(UseCompressedOops, "must be on to call this method"); |
|
1170 |
load_const_32to64(t, a, false /*sign_extend*/); |
|
1171 |
} |
|
1172 |
||
1173 |
// Load narrow klass constant, compression required. |
|
1174 |
void MacroAssembler::load_narrow_klass(Register t, Klass* k) { |
|
1175 |
assert(UseCompressedClassPointers, "must be on to call this method"); |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
1176 |
narrowKlass encoded_k = CompressedKlassPointers::encode(k); |
42065 | 1177 |
load_const_32to64(t, encoded_k, false /*sign_extend*/); |
1178 |
} |
|
1179 |
||
1180 |
//------------------------------------------------------ |
|
1181 |
// Compare (patchable) constant with register. |
|
1182 |
//------------------------------------------------------ |
|
1183 |
||
1184 |
// Compare narrow oop in reg with narrow oop constant, no decompression. |
|
1185 |
void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { |
|
1186 |
assert(UseCompressedOops, "must be on to call this method"); |
|
1187 |
||
1188 |
Assembler::z_clfi(oop1, oop2); |
|
1189 |
} |
|
1190 |
||
1191 |
// Compare narrow oop in reg with narrow oop constant, no decompression. |
|
1192 |
void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { |
|
1193 |
assert(UseCompressedClassPointers, "must be on to call this method"); |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
1194 |
narrowKlass encoded_k = CompressedKlassPointers::encode(klass2); |
42065 | 1195 |
|
1196 |
Assembler::z_clfi(klass1, encoded_k); |
|
1197 |
} |
|
1198 |
||
1199 |
//---------------------------------------------------------- |
|
1200 |
// Check which kind of load_constant we have here. |
|
1201 |
//---------------------------------------------------------- |
|
1202 |
||
1203 |
// Detection of CPU version dependent load_const sequence. |
|
1204 |
// The detection is valid only for code sequences generated by load_const, |
|
1205 |
// not load_const_optimized. |
|
1206 |
bool MacroAssembler::is_load_const(address a) { |
|
1207 |
unsigned long inst1, inst2; |
|
1208 |
unsigned int len1, len2; |
|
1209 |
||
1210 |
len1 = get_instruction(a, &inst1); |
|
1211 |
len2 = get_instruction(a + len1, &inst2); |
|
1212 |
||
1213 |
return is_z_iihf(inst1) && is_z_iilf(inst2); |
|
1214 |
} |
|
1215 |
||
1216 |
// Detection of CPU version dependent load_const_32to64 sequence. |
|
1217 |
// Mostly used for narrow oops and narrow Klass pointers. |
|
1218 |
// The detection is valid only for code sequences generated by load_const_32to64. |
|
1219 |
bool MacroAssembler::is_load_const_32to64(address pos) { |
|
1220 |
unsigned long inst1, inst2; |
|
1221 |
unsigned int len1; |
|
1222 |
||
1223 |
len1 = get_instruction(pos, &inst1); |
|
1224 |
return is_z_llilf(inst1); |
|
1225 |
} |
|
1226 |
||
1227 |
// Detection of compare_immediate_narrow sequence. |
|
1228 |
// The detection is valid only for code sequences generated by compare_immediate_narrow_oop. |
|
1229 |
bool MacroAssembler::is_compare_immediate32(address pos) { |
|
1230 |
return is_equal(pos, CLFI_ZOPC, RIL_MASK); |
|
1231 |
} |
|
1232 |
||
1233 |
// Detection of compare_immediate_narrow sequence. |
|
1234 |
// The detection is valid only for code sequences generated by compare_immediate_narrow_oop. |
|
1235 |
bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { |
|
1236 |
return is_compare_immediate32(pos); |
|
1237 |
} |
|
1238 |
||
1239 |
// Detection of compare_immediate_narrow sequence. |
|
1240 |
// The detection is valid only for code sequences generated by compare_immediate_narrow_klass. |
|
1241 |
bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { |
|
1242 |
return is_compare_immediate32(pos); |
|
1243 |
} |
|
1244 |
||
1245 |
//----------------------------------- |
|
1246 |
// patch the load_constant |
|
1247 |
//----------------------------------- |
|
1248 |
||
1249 |
// CPU-version dependend patching of load_const. |
|
1250 |
void MacroAssembler::patch_const(address a, long x) { |
|
1251 |
assert(is_load_const(a), "not a load of a constant"); |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1252 |
// Note: Right shift is only cleanly defined for unsigned types |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1253 |
// or for signed types with nonnegative values. |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1254 |
set_imm32((address)a, (long)((unsigned long)x >> 32)); |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1255 |
set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL)); |
42065 | 1256 |
} |
1257 |
||
1258 |
// Patching the value of CPU version dependent load_const_32to64 sequence. |
|
1259 |
// The passed ptr MUST be in compressed format! |
|
1260 |
int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { |
|
1261 |
assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); |
|
1262 |
||
1263 |
set_imm32(pos, np); |
|
1264 |
return 6; |
|
1265 |
} |
|
1266 |
||
1267 |
// Patching the value of CPU version dependent compare_immediate_narrow sequence. |
|
1268 |
// The passed ptr MUST be in compressed format! |
|
1269 |
int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { |
|
1270 |
assert(is_compare_immediate32(pos), "not a compressed ptr compare"); |
|
1271 |
||
1272 |
set_imm32(pos, np); |
|
1273 |
return 6; |
|
1274 |
} |
|
1275 |
||
1276 |
// Patching the immediate value of CPU version dependent load_narrow_oop sequence. |
|
1277 |
// The passed ptr must NOT be in compressed format! |
|
1278 |
int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { |
|
1279 |
assert(UseCompressedOops, "Can only patch compressed oops"); |
|
1280 |
||
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
49455
diff
changeset
|
1281 |
narrowOop no = CompressedOops::encode(o); |
42065 | 1282 |
return patch_load_const_32to64(pos, no); |
1283 |
} |
|
1284 |
||
1285 |
// Patching the immediate value of CPU version dependent load_narrow_klass sequence. |
|
1286 |
// The passed ptr must NOT be in compressed format! |
|
1287 |
int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { |
|
1288 |
assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); |
|
1289 |
||
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
1290 |
narrowKlass nk = CompressedKlassPointers::encode(k); |
42065 | 1291 |
return patch_load_const_32to64(pos, nk); |
1292 |
} |
|
1293 |
||
1294 |
// Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. |
|
1295 |
// The passed ptr must NOT be in compressed format! |
|
1296 |
int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { |
|
1297 |
assert(UseCompressedOops, "Can only patch compressed oops"); |
|
1298 |
||
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
49455
diff
changeset
|
1299 |
narrowOop no = CompressedOops::encode(o); |
42065 | 1300 |
return patch_compare_immediate_32(pos, no); |
1301 |
} |
|
1302 |
||
1303 |
// Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. |
|
1304 |
// The passed ptr must NOT be in compressed format! |
|
1305 |
int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { |
|
1306 |
assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); |
|
1307 |
||
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
1308 |
narrowKlass nk = CompressedKlassPointers::encode(k); |
42065 | 1309 |
return patch_compare_immediate_32(pos, nk); |
1310 |
} |
|
1311 |
||
1312 |
//------------------------------------------------------------------------ |
|
1313 |
// Extract the constant from a load_constant instruction stream. |
|
1314 |
//------------------------------------------------------------------------ |
|
1315 |
||
1316 |
// Get constant from a load_const sequence. |
|
1317 |
long MacroAssembler::get_const(address a) { |
|
1318 |
assert(is_load_const(a), "not a load of a constant"); |
|
1319 |
unsigned long x; |
|
1320 |
x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); |
|
1321 |
x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); |
|
1322 |
return (long) x; |
|
1323 |
} |
|
1324 |
||
1325 |
//-------------------------------------- |
|
1326 |
// Store a constant in memory. |
|
1327 |
//-------------------------------------- |
|
1328 |
||
1329 |
// General emitter to move a constant to memory. |
|
1330 |
// The store is atomic. |
|
1331 |
// o Address must be given in RS format (no index register) |
|
1332 |
// o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. |
|
1333 |
// o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. |
|
1334 |
// o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. |
|
1335 |
// o Memory slot must be at least as wide as constant, will assert otherwise. |
|
1336 |
// o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. |
|
1337 |
int MacroAssembler::store_const(const Address &dest, long imm, |
|
1338 |
unsigned int lm, unsigned int lc, |
|
1339 |
Register scratch) { |
|
1340 |
int64_t disp = dest.disp(); |
|
1341 |
Register base = dest.base(); |
|
1342 |
assert(!dest.has_index(), "not supported"); |
|
1343 |
assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); |
|
1344 |
assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); |
|
1345 |
assert(lm>=lc, "memory slot too small"); |
|
1346 |
assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); |
|
1347 |
assert(Displacement::is_validDisp(disp), "displacement out of range"); |
|
1348 |
||
1349 |
bool is_shortDisp = Displacement::is_shortDisp(disp); |
|
1350 |
int store_offset = -1; |
|
1351 |
||
1352 |
// For target len == 1 it's easy. |
|
1353 |
if (lm == 1) { |
|
1354 |
store_offset = offset(); |
|
1355 |
if (is_shortDisp) { |
|
1356 |
z_mvi(disp, base, imm); |
|
1357 |
return store_offset; |
|
1358 |
} else { |
|
1359 |
z_mviy(disp, base, imm); |
|
1360 |
return store_offset; |
|
1361 |
} |
|
1362 |
} |
|
1363 |
||
1364 |
// All the "good stuff" takes an unsigned displacement. |
|
1365 |
if (is_shortDisp) { |
|
1366 |
// NOTE: Cannot use clear_mem for imm==0, because it is not atomic. |
|
1367 |
||
1368 |
store_offset = offset(); |
|
1369 |
switch (lm) { |
|
1370 |
case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. |
|
1371 |
z_mvhhi(disp, base, imm); |
|
1372 |
return store_offset; |
|
1373 |
case 4: |
|
1374 |
if (Immediate::is_simm16(imm)) { |
|
1375 |
z_mvhi(disp, base, imm); |
|
1376 |
return store_offset; |
|
1377 |
} |
|
1378 |
break; |
|
1379 |
case 8: |
|
1380 |
if (Immediate::is_simm16(imm)) { |
|
1381 |
z_mvghi(disp, base, imm); |
|
1382 |
return store_offset; |
|
1383 |
} |
|
1384 |
break; |
|
1385 |
default: |
|
1386 |
ShouldNotReachHere(); |
|
1387 |
break; |
|
1388 |
} |
|
1389 |
} |
|
1390 |
||
1391 |
// Can't optimize, so load value and store it. |
|
1392 |
guarantee(scratch != noreg, " need a scratch register here !"); |
|
1393 |
if (imm != 0) { |
|
1394 |
load_const_optimized(scratch, imm); // Preserves CC anyway. |
|
1395 |
} else { |
|
1396 |
// Leave CC alone!! |
|
1397 |
(void) clear_reg(scratch, true, false); // Indicate unused result. |
|
1398 |
} |
|
1399 |
||
1400 |
store_offset = offset(); |
|
1401 |
if (is_shortDisp) { |
|
1402 |
switch (lm) { |
|
1403 |
case 2: |
|
1404 |
z_sth(scratch, disp, Z_R0, base); |
|
1405 |
return store_offset; |
|
1406 |
case 4: |
|
1407 |
z_st(scratch, disp, Z_R0, base); |
|
1408 |
return store_offset; |
|
1409 |
case 8: |
|
1410 |
z_stg(scratch, disp, Z_R0, base); |
|
1411 |
return store_offset; |
|
1412 |
default: |
|
1413 |
ShouldNotReachHere(); |
|
1414 |
break; |
|
1415 |
} |
|
1416 |
} else { |
|
1417 |
switch (lm) { |
|
1418 |
case 2: |
|
1419 |
z_sthy(scratch, disp, Z_R0, base); |
|
1420 |
return store_offset; |
|
1421 |
case 4: |
|
1422 |
z_sty(scratch, disp, Z_R0, base); |
|
1423 |
return store_offset; |
|
1424 |
case 8: |
|
1425 |
z_stg(scratch, disp, Z_R0, base); |
|
1426 |
return store_offset; |
|
1427 |
default: |
|
1428 |
ShouldNotReachHere(); |
|
1429 |
break; |
|
1430 |
} |
|
1431 |
} |
|
1432 |
return -1; // should not reach here |
|
1433 |
} |
|
1434 |
||
1435 |
//=================================================================== |
|
1436 |
//=== N O T P A T CH A B L E C O N S T A N T S === |
|
1437 |
//=================================================================== |
|
1438 |
||
1439 |
// Load constant x into register t with a fast instrcution sequence |
|
1440 |
// depending on the bits in x. Preserves CC under all circumstances. |
|
1441 |
int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { |
|
1442 |
if (x == 0) { |
|
1443 |
int len; |
|
1444 |
if (emit) { |
|
1445 |
len = clear_reg(t, true, false); |
|
1446 |
} else { |
|
1447 |
len = 4; |
|
1448 |
} |
|
1449 |
return len; |
|
1450 |
} |
|
1451 |
||
1452 |
if (Immediate::is_simm16(x)) { |
|
1453 |
if (emit) { z_lghi(t, x); } |
|
1454 |
return 4; |
|
1455 |
} |
|
1456 |
||
1457 |
// 64 bit value: | part1 | part2 | part3 | part4 | |
|
1458 |
// At least one part is not zero! |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1459 |
// Note: Right shift is only cleanly defined for unsigned types |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1460 |
// or for signed types with nonnegative values. |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1461 |
int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff; |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1462 |
int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff; |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1463 |
int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff; |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1464 |
int part4 = (int)x & 0x0000ffff; |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1465 |
int part12 = (int)((unsigned long)x >> 32); |
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1466 |
int part34 = (int)x; |
42065 | 1467 |
|
1468 |
// Lower word only (unsigned). |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1469 |
if (part12 == 0) { |
42065 | 1470 |
if (part3 == 0) { |
1471 |
if (emit) z_llill(t, part4); |
|
1472 |
return 4; |
|
1473 |
} |
|
1474 |
if (part4 == 0) { |
|
1475 |
if (emit) z_llilh(t, part3); |
|
1476 |
return 4; |
|
1477 |
} |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1478 |
if (emit) z_llilf(t, part34); |
42065 | 1479 |
return 6; |
1480 |
} |
|
1481 |
||
1482 |
// Upper word only. |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1483 |
if (part34 == 0) { |
42065 | 1484 |
if (part1 == 0) { |
1485 |
if (emit) z_llihl(t, part2); |
|
1486 |
return 4; |
|
1487 |
} |
|
1488 |
if (part2 == 0) { |
|
1489 |
if (emit) z_llihh(t, part1); |
|
1490 |
return 4; |
|
1491 |
} |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1492 |
if (emit) z_llihf(t, part12); |
42065 | 1493 |
return 6; |
1494 |
} |
|
1495 |
||
1496 |
// Lower word only (signed). |
|
1497 |
if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1498 |
if (emit) z_lgfi(t, part34); |
42065 | 1499 |
return 6; |
1500 |
} |
|
1501 |
||
1502 |
int len = 0; |
|
1503 |
||
1504 |
if ((part1 == 0) || (part2 == 0)) { |
|
1505 |
if (part1 == 0) { |
|
1506 |
if (emit) z_llihl(t, part2); |
|
1507 |
len += 4; |
|
1508 |
} else { |
|
1509 |
if (emit) z_llihh(t, part1); |
|
1510 |
len += 4; |
|
1511 |
} |
|
1512 |
} else { |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1513 |
if (emit) z_llihf(t, part12); |
42065 | 1514 |
len += 6; |
1515 |
} |
|
1516 |
||
1517 |
if ((part3 == 0) || (part4 == 0)) { |
|
1518 |
if (part3 == 0) { |
|
1519 |
if (emit) z_iill(t, part4); |
|
1520 |
len += 4; |
|
1521 |
} else { |
|
1522 |
if (emit) z_iilh(t, part3); |
|
1523 |
len += 4; |
|
1524 |
} |
|
1525 |
} else { |
|
51684
4e99f412148f
8210319: [s390]: Use of shift operators not covered by cpp standard
lucy
parents:
51663
diff
changeset
|
1526 |
if (emit) z_iilf(t, part34); |
42065 | 1527 |
len += 6; |
1528 |
} |
|
1529 |
return len; |
|
1530 |
} |
|
1531 |
||
1532 |
//===================================================================== |
|
1533 |
//=== H I G H E R L E V E L B R A N C H E M I T T E R S === |
|
1534 |
//===================================================================== |
|
1535 |
||
1536 |
// Note: In the worst case, one of the scratch registers is destroyed!!! |
|
1537 |
void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { |
|
1538 |
// Right operand is constant. |
|
1539 |
if (x2.is_constant()) { |
|
1540 |
jlong value = x2.as_constant(); |
|
1541 |
compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); |
|
1542 |
return; |
|
1543 |
} |
|
1544 |
||
1545 |
// Right operand is in register. |
|
1546 |
compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); |
|
1547 |
} |
|
1548 |
||
1549 |
// Note: In the worst case, one of the scratch registers is destroyed!!! |
|
1550 |
void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { |
|
1551 |
// Right operand is constant. |
|
1552 |
if (x2.is_constant()) { |
|
1553 |
jlong value = x2.as_constant(); |
|
1554 |
compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); |
|
1555 |
return; |
|
1556 |
} |
|
1557 |
||
1558 |
// Right operand is in register. |
|
1559 |
compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); |
|
1560 |
} |
|
1561 |
||
1562 |
// Note: In the worst case, one of the scratch registers is destroyed!!! |
|
1563 |
void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { |
|
1564 |
// Right operand is constant. |
|
1565 |
if (x2.is_constant()) { |
|
1566 |
jlong value = x2.as_constant(); |
|
1567 |
compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); |
|
1568 |
return; |
|
1569 |
} |
|
1570 |
||
1571 |
// Right operand is in register. |
|
1572 |
compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); |
|
1573 |
} |
|
1574 |
||
1575 |
void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { |
|
1576 |
// Right operand is constant. |
|
1577 |
if (x2.is_constant()) { |
|
1578 |
jlong value = x2.as_constant(); |
|
1579 |
compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); |
|
1580 |
return; |
|
1581 |
} |
|
1582 |
||
1583 |
// Right operand is in register. |
|
1584 |
compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); |
|
1585 |
} |
|
1586 |
||
1587 |
// Generate an optimal branch to the branch target. |
|
1588 |
// Optimal means that a relative branch (brc or brcl) is used if the |
|
1589 |
// branch distance is short enough. Loading the target address into a |
|
1590 |
// register and branching via reg is used as fallback only. |
|
1591 |
// |
|
1592 |
// Used registers: |
|
1593 |
// Z_R1 - work reg. Holds branch target address. |
|
1594 |
// Used in fallback case only. |
|
1595 |
// |
|
1596 |
// This version of branch_optimized is good for cases where the target address is known |
|
1597 |
// and constant, i.e. is never changed (no relocation, no patching). |
|
1598 |
void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { |
|
1599 |
address branch_origin = pc(); |
|
1600 |
||
1601 |
if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { |
|
1602 |
z_brc(cond, branch_addr); |
|
1603 |
} else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { |
|
1604 |
z_brcl(cond, branch_addr); |
|
1605 |
} else { |
|
1606 |
load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. |
|
1607 |
z_bcr(cond, Z_R1); |
|
1608 |
} |
|
1609 |
} |
|
1610 |
||
1611 |
// This version of branch_optimized is good for cases where the target address |
|
1612 |
// is potentially not yet known at the time the code is emitted. |
|
1613 |
// |
|
1614 |
// One very common case is a branch to an unbound label which is handled here. |
|
1615 |
// The caller might know (or hope) that the branch distance is short enough |
|
1616 |
// to be encoded in a 16bit relative address. In this case he will pass a |
|
1617 |
// NearLabel branch_target. |
|
1618 |
// Care must be taken with unbound labels. Each call to target(label) creates |
|
1619 |
// an entry in the patch queue for that label to patch all references of the label |
|
1620 |
// once it gets bound. Those recorded patch locations must be patchable. Otherwise, |
|
1621 |
// an assertion fires at patch time. |
|
1622 |
void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { |
|
1623 |
if (branch_target.is_bound()) { |
|
1624 |
address branch_addr = target(branch_target); |
|
1625 |
branch_optimized(cond, branch_addr); |
|
46272
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1626 |
} else if (branch_target.is_near()) { |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1627 |
z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc. |
42065 | 1628 |
} else { |
1629 |
z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. |
|
1630 |
} |
|
1631 |
} |
|
1632 |
||
1633 |
// Generate an optimal compare and branch to the branch target. |
|
1634 |
// Optimal means that a relative branch (clgrj, brc or brcl) is used if the |
|
1635 |
// branch distance is short enough. Loading the target address into a |
|
1636 |
// register and branching via reg is used as fallback only. |
|
1637 |
// |
|
1638 |
// Input: |
|
1639 |
// r1 - left compare operand |
|
1640 |
// r2 - right compare operand |
|
1641 |
void MacroAssembler::compare_and_branch_optimized(Register r1, |
|
1642 |
Register r2, |
|
1643 |
Assembler::branch_condition cond, |
|
1644 |
address branch_addr, |
|
1645 |
bool len64, |
|
1646 |
bool has_sign) { |
|
1647 |
unsigned int casenum = (len64?2:0)+(has_sign?0:1); |
|
1648 |
||
1649 |
address branch_origin = pc(); |
|
1650 |
if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { |
|
1651 |
switch (casenum) { |
|
1652 |
case 0: z_crj( r1, r2, cond, branch_addr); break; |
|
1653 |
case 1: z_clrj (r1, r2, cond, branch_addr); break; |
|
1654 |
case 2: z_cgrj(r1, r2, cond, branch_addr); break; |
|
1655 |
case 3: z_clgrj(r1, r2, cond, branch_addr); break; |
|
1656 |
default: ShouldNotReachHere(); break; |
|
1657 |
} |
|
1658 |
} else { |
|
1659 |
switch (casenum) { |
|
1660 |
case 0: z_cr( r1, r2); break; |
|
1661 |
case 1: z_clr(r1, r2); break; |
|
1662 |
case 2: z_cgr(r1, r2); break; |
|
1663 |
case 3: z_clgr(r1, r2); break; |
|
1664 |
default: ShouldNotReachHere(); break; |
|
1665 |
} |
|
1666 |
branch_optimized(cond, branch_addr); |
|
1667 |
} |
|
1668 |
} |
|
1669 |
||
1670 |
// Generate an optimal compare and branch to the branch target. |
|
1671 |
// Optimal means that a relative branch (clgij, brc or brcl) is used if the |
|
1672 |
// branch distance is short enough. Loading the target address into a |
|
1673 |
// register and branching via reg is used as fallback only. |
|
1674 |
// |
|
1675 |
// Input: |
|
1676 |
// r1 - left compare operand (in register) |
|
1677 |
// x2 - right compare operand (immediate) |
|
1678 |
void MacroAssembler::compare_and_branch_optimized(Register r1, |
|
1679 |
jlong x2, |
|
1680 |
Assembler::branch_condition cond, |
|
1681 |
Label& branch_target, |
|
1682 |
bool len64, |
|
1683 |
bool has_sign) { |
|
1684 |
address branch_origin = pc(); |
|
1685 |
bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); |
|
46272
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1686 |
bool is_RelAddr16 = branch_target.is_near() || |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1687 |
(branch_target.is_bound() && |
42065 | 1688 |
RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); |
1689 |
unsigned int casenum = (len64?2:0)+(has_sign?0:1); |
|
1690 |
||
1691 |
if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { |
|
1692 |
switch (casenum) { |
|
1693 |
case 0: z_cij( r1, x2, cond, branch_target); break; |
|
1694 |
case 1: z_clij(r1, x2, cond, branch_target); break; |
|
1695 |
case 2: z_cgij(r1, x2, cond, branch_target); break; |
|
1696 |
case 3: z_clgij(r1, x2, cond, branch_target); break; |
|
1697 |
default: ShouldNotReachHere(); break; |
|
1698 |
} |
|
1699 |
return; |
|
1700 |
} |
|
1701 |
||
1702 |
if (x2 == 0) { |
|
1703 |
switch (casenum) { |
|
1704 |
case 0: z_ltr(r1, r1); break; |
|
1705 |
case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! |
|
1706 |
case 2: z_ltgr(r1, r1); break; |
|
1707 |
case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! |
|
1708 |
default: ShouldNotReachHere(); break; |
|
1709 |
} |
|
1710 |
} else { |
|
1711 |
if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { |
|
1712 |
switch (casenum) { |
|
1713 |
case 0: z_chi(r1, x2); break; |
|
1714 |
case 1: z_chi(r1, x2); break; // positive immediate < 2**15 |
|
1715 |
case 2: z_cghi(r1, x2); break; |
|
1716 |
case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 |
|
1717 |
default: break; |
|
1718 |
} |
|
1719 |
} else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { |
|
1720 |
switch (casenum) { |
|
1721 |
case 0: z_cfi( r1, x2); break; |
|
1722 |
case 1: z_clfi(r1, x2); break; |
|
1723 |
case 2: z_cgfi(r1, x2); break; |
|
1724 |
case 3: z_clgfi(r1, x2); break; |
|
1725 |
default: ShouldNotReachHere(); break; |
|
1726 |
} |
|
1727 |
} else { |
|
1728 |
// No instruction with immediate operand possible, so load into register. |
|
1729 |
Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; |
|
1730 |
load_const_optimized(scratch, x2); |
|
1731 |
switch (casenum) { |
|
1732 |
case 0: z_cr( r1, scratch); break; |
|
1733 |
case 1: z_clr(r1, scratch); break; |
|
1734 |
case 2: z_cgr(r1, scratch); break; |
|
1735 |
case 3: z_clgr(r1, scratch); break; |
|
1736 |
default: ShouldNotReachHere(); break; |
|
1737 |
} |
|
1738 |
} |
|
1739 |
} |
|
1740 |
branch_optimized(cond, branch_target); |
|
1741 |
} |
|
1742 |
||
1743 |
// Generate an optimal compare and branch to the branch target. |
|
1744 |
// Optimal means that a relative branch (clgrj, brc or brcl) is used if the |
|
1745 |
// branch distance is short enough. Loading the target address into a |
|
1746 |
// register and branching via reg is used as fallback only. |
|
1747 |
// |
|
1748 |
// Input: |
|
1749 |
// r1 - left compare operand |
|
1750 |
// r2 - right compare operand |
|
1751 |
void MacroAssembler::compare_and_branch_optimized(Register r1, |
|
1752 |
Register r2, |
|
1753 |
Assembler::branch_condition cond, |
|
1754 |
Label& branch_target, |
|
1755 |
bool len64, |
|
1756 |
bool has_sign) { |
|
46272
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1757 |
unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1); |
42065 | 1758 |
|
1759 |
if (branch_target.is_bound()) { |
|
1760 |
address branch_addr = target(branch_target); |
|
1761 |
compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); |
|
1762 |
} else { |
|
46272
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1763 |
if (VM_Version::has_CompareBranch() && branch_target.is_near()) { |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1764 |
switch (casenum) { |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1765 |
case 0: z_crj( r1, r2, cond, branch_target); break; |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1766 |
case 1: z_clrj( r1, r2, cond, branch_target); break; |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1767 |
case 2: z_cgrj( r1, r2, cond, branch_target); break; |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1768 |
case 3: z_clgrj(r1, r2, cond, branch_target); break; |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1769 |
default: ShouldNotReachHere(); break; |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1770 |
} |
3cee5c1f3459
8173465: Introduce NearLabel for branches known to be short.
goetz
parents:
42065
diff
changeset
|
1771 |
} else { |
42065 | 1772 |
switch (casenum) { |
1773 |
case 0: z_cr( r1, r2); break; |
|
1774 |
case 1: z_clr(r1, r2); break; |
|
1775 |
case 2: z_cgr(r1, r2); break; |
|
1776 |
case 3: z_clgr(r1, r2); break; |
|
1777 |
default: ShouldNotReachHere(); break; |
|
1778 |
} |
|
1779 |
branch_optimized(cond, branch_target); |
|
1780 |
} |
|
1781 |
} |
|
1782 |
} |
|
1783 |
||
1784 |
//=========================================================================== |
|
1785 |
//=== END H I G H E R L E V E L B R A N C H E M I T T E R S === |
|
1786 |
//=========================================================================== |
|
1787 |
||
1788 |
AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { |
|
1789 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1790 |
int index = oop_recorder()->allocate_metadata_index(obj); |
|
1791 |
RelocationHolder rspec = metadata_Relocation::spec(index); |
|
1792 |
return AddressLiteral((address)obj, rspec); |
|
1793 |
} |
|
1794 |
||
1795 |
AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { |
|
1796 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1797 |
int index = oop_recorder()->find_index(obj); |
|
1798 |
RelocationHolder rspec = metadata_Relocation::spec(index); |
|
1799 |
return AddressLiteral((address)obj, rspec); |
|
1800 |
} |
|
1801 |
||
1802 |
AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { |
|
1803 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1804 |
int oop_index = oop_recorder()->allocate_oop_index(obj); |
|
1805 |
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); |
|
1806 |
} |
|
1807 |
||
1808 |
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { |
|
1809 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1810 |
int oop_index = oop_recorder()->find_index(obj); |
|
1811 |
return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); |
|
1812 |
} |
|
1813 |
||
1814 |
// NOTE: destroys r |
|
1815 |
void MacroAssembler::c2bool(Register r, Register t) { |
|
1816 |
z_lcr(t, r); // t = -r |
|
1817 |
z_or(r, t); // r = -r OR r |
|
1818 |
z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. |
|
1819 |
} |
|
1820 |
||
1821 |
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, |
|
1822 |
Register tmp, |
|
1823 |
int offset) { |
|
1824 |
intptr_t value = *delayed_value_addr; |
|
1825 |
if (value != 0) { |
|
1826 |
return RegisterOrConstant(value + offset); |
|
1827 |
} |
|
1828 |
||
1829 |
BLOCK_COMMENT("delayed_value {"); |
|
1830 |
// Load indirectly to solve generation ordering problem. |
|
1831 |
load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; |
|
1832 |
z_lg(tmp, 0, tmp); // tmp = *tmp; |
|
1833 |
||
1834 |
#ifdef ASSERT |
|
1835 |
NearLabel L; |
|
1836 |
compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); |
|
1837 |
z_illtrap(); |
|
1838 |
bind(L); |
|
1839 |
#endif |
|
1840 |
||
1841 |
if (offset != 0) { |
|
1842 |
z_agfi(tmp, offset); // tmp = tmp + offset; |
|
1843 |
} |
|
1844 |
||
1845 |
BLOCK_COMMENT("} delayed_value"); |
|
1846 |
return RegisterOrConstant(tmp); |
|
1847 |
} |
|
1848 |
||
1849 |
// Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' |
|
1850 |
// and return the resulting instruction. |
|
1851 |
// Dest_pos and inst_pos are 32 bit only. These parms can only designate |
|
1852 |
// relative positions. |
|
1853 |
// Use correct argument types. Do not pre-calculate distance. |
|
1854 |
unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { |
|
1855 |
int c = 0; |
|
1856 |
unsigned long patched_inst = 0; |
|
1857 |
if (is_call_pcrelative_short(inst) || |
|
1858 |
is_branch_pcrelative_short(inst) || |
|
1859 |
is_branchoncount_pcrelative_short(inst) || |
|
1860 |
is_branchonindex32_pcrelative_short(inst)) { |
|
1861 |
c = 1; |
|
1862 |
int m = fmask(15, 0); // simm16(-1, 16, 32); |
|
1863 |
int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); |
|
1864 |
patched_inst = (inst & ~m) | v; |
|
1865 |
} else if (is_compareandbranch_pcrelative_short(inst)) { |
|
1866 |
c = 2; |
|
1867 |
long m = fmask(31, 16); // simm16(-1, 16, 48); |
|
1868 |
long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); |
|
1869 |
patched_inst = (inst & ~m) | v; |
|
1870 |
} else if (is_branchonindex64_pcrelative_short(inst)) { |
|
1871 |
c = 3; |
|
1872 |
long m = fmask(31, 16); // simm16(-1, 16, 48); |
|
1873 |
long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); |
|
1874 |
patched_inst = (inst & ~m) | v; |
|
1875 |
} else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { |
|
1876 |
c = 4; |
|
1877 |
long m = fmask(31, 0); // simm32(-1, 16, 48); |
|
1878 |
long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); |
|
1879 |
patched_inst = (inst & ~m) | v; |
|
1880 |
} else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. |
|
1881 |
c = 5; |
|
1882 |
long m = fmask(31, 0); // simm32(-1, 16, 48); |
|
1883 |
long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); |
|
1884 |
patched_inst = (inst & ~m) | v; |
|
1885 |
} else { |
|
1886 |
print_dbg_msg(tty, inst, "not a relative branch", 0); |
|
1887 |
dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); |
|
1888 |
ShouldNotReachHere(); |
|
1889 |
} |
|
1890 |
||
1891 |
long new_off = get_pcrel_offset(patched_inst); |
|
1892 |
if (new_off != (dest_pos-inst_pos)) { |
|
1893 |
tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); |
|
1894 |
print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); |
|
1895 |
print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); |
|
1896 |
#ifdef LUCY_DBG |
|
1897 |
VM_Version::z_SIGSEGV(); |
|
1898 |
#endif |
|
1899 |
ShouldNotReachHere(); |
|
1900 |
} |
|
1901 |
return patched_inst; |
|
1902 |
} |
|
1903 |
||
1904 |
// Only called when binding labels (share/vm/asm/assembler.cpp) |
|
1905 |
// Pass arguments as intended. Do not pre-calculate distance. |
|
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
50728
diff
changeset
|
1906 |
void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { |
42065 | 1907 |
unsigned long stub_inst; |
1908 |
int inst_len = get_instruction(branch, &stub_inst); |
|
1909 |
||
1910 |
set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); |
|
1911 |
} |
|
1912 |
||
1913 |
||
1914 |
// Extract relative address (aka offset). |
|
1915 |
// inv_simm16 works for 4-byte instructions only. |
|
1916 |
// compare and branch instructions are 6-byte and have a 16bit offset "in the middle". |
|
1917 |
long MacroAssembler::get_pcrel_offset(unsigned long inst) { |
|
1918 |
||
1919 |
if (MacroAssembler::is_pcrelative_short(inst)) { |
|
1920 |
if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { |
|
1921 |
return RelAddr::inv_pcrel_off16(inv_simm16(inst)); |
|
1922 |
} else { |
|
1923 |
return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); |
|
1924 |
} |
|
1925 |
} |
|
1926 |
||
1927 |
if (MacroAssembler::is_pcrelative_long(inst)) { |
|
1928 |
return RelAddr::inv_pcrel_off32(inv_simm32(inst)); |
|
1929 |
} |
|
1930 |
||
1931 |
print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); |
|
1932 |
#ifdef LUCY_DBG |
|
1933 |
VM_Version::z_SIGSEGV(); |
|
1934 |
#else |
|
1935 |
ShouldNotReachHere(); |
|
1936 |
#endif |
|
1937 |
return -1; |
|
1938 |
} |
|
1939 |
||
1940 |
long MacroAssembler::get_pcrel_offset(address pc) { |
|
1941 |
unsigned long inst; |
|
1942 |
unsigned int len = get_instruction(pc, &inst); |
|
1943 |
||
1944 |
#ifdef ASSERT |
|
1945 |
long offset; |
|
1946 |
if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { |
|
1947 |
offset = get_pcrel_offset(inst); |
|
1948 |
} else { |
|
1949 |
offset = -1; |
|
1950 |
} |
|
1951 |
||
1952 |
if (offset == -1) { |
|
1953 |
dump_code_range(tty, pc, 32, "not a pcrelative instruction"); |
|
1954 |
#ifdef LUCY_DBG |
|
1955 |
VM_Version::z_SIGSEGV(); |
|
1956 |
#else |
|
1957 |
ShouldNotReachHere(); |
|
1958 |
#endif |
|
1959 |
} |
|
1960 |
return offset; |
|
1961 |
#else |
|
1962 |
return get_pcrel_offset(inst); |
|
1963 |
#endif // ASSERT |
|
1964 |
} |
|
1965 |
||
1966 |
// Get target address from pc-relative instructions. |
|
1967 |
address MacroAssembler::get_target_addr_pcrel(address pc) { |
|
1968 |
assert(is_pcrelative_long(pc), "not a pcrelative instruction"); |
|
1969 |
return pc + get_pcrel_offset(pc); |
|
1970 |
} |
|
1971 |
||
1972 |
// Patch pc relative load address. |
|
1973 |
void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { |
|
1974 |
unsigned long inst; |
|
1975 |
// Offset is +/- 2**32 -> use long. |
|
1976 |
ptrdiff_t distance = con - pc; |
|
1977 |
||
1978 |
get_instruction(pc, &inst); |
|
1979 |
||
1980 |
if (is_pcrelative_short(inst)) { |
|
1981 |
*(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. |
|
1982 |
||
1983 |
// Some extra safety net. |
|
1984 |
if (!RelAddr::is_in_range_of_RelAddr16(distance)) { |
|
1985 |
print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); |
|
1986 |
dump_code_range(tty, pc, 32, "distance out of range (16bit)"); |
|
1987 |
guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); |
|
1988 |
} |
|
1989 |
return; |
|
1990 |
} |
|
1991 |
||
1992 |
if (is_pcrelative_long(inst)) { |
|
1993 |
*(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); |
|
1994 |
||
1995 |
// Some Extra safety net. |
|
1996 |
if (!RelAddr::is_in_range_of_RelAddr32(distance)) { |
|
1997 |
print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); |
|
1998 |
dump_code_range(tty, pc, 32, "distance out of range (32bit)"); |
|
1999 |
guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); |
|
2000 |
} |
|
2001 |
return; |
|
2002 |
} |
|
2003 |
||
2004 |
guarantee(false, "not a pcrelative instruction to patch!"); |
|
2005 |
} |
|
2006 |
||
2007 |
// "Current PC" here means the address just behind the basr instruction. |
|
2008 |
address MacroAssembler::get_PC(Register result) { |
|
2009 |
z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. |
|
2010 |
return pc(); |
|
2011 |
} |
|
2012 |
||
2013 |
// Get current PC + offset. |
|
2014 |
// Offset given in bytes, must be even! |
|
2015 |
// "Current PC" here means the address of the larl instruction plus the given offset. |
|
2016 |
address MacroAssembler::get_PC(Register result, int64_t offset) { |
|
2017 |
address here = pc(); |
|
2018 |
z_larl(result, offset/2); // Save target instruction address in result. |
|
2019 |
return here + offset; |
|
2020 |
} |
|
2021 |
||
48332
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2022 |
void MacroAssembler::instr_size(Register size, Register pc) { |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2023 |
// Extract 2 most significant bits of current instruction. |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2024 |
z_llgc(size, Address(pc)); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2025 |
z_srl(size, 6); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2026 |
// Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6. |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2027 |
z_ahi(size, 3); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2028 |
z_nill(size, 6); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2029 |
} |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2030 |
|
42065 | 2031 |
// Resize_frame with SP(new) = SP(old) - [offset]. |
2032 |
void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) |
|
2033 |
{ |
|
2034 |
assert_different_registers(offset, fp, Z_SP); |
|
2035 |
if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } |
|
2036 |
||
2037 |
z_sgr(Z_SP, offset); |
|
2038 |
z_stg(fp, _z_abi(callers_sp), Z_SP); |
|
2039 |
} |
|
2040 |
||
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2041 |
// Resize_frame with SP(new) = [newSP] + offset. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2042 |
// This emitter is useful if we already have calculated a pointer |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2043 |
// into the to-be-allocated stack space, e.g. with special alignment properties, |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2044 |
// but need some additional space, e.g. for spilling. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2045 |
// newSP is the pre-calculated pointer. It must not be modified. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2046 |
// fp holds, or is filled with, the frame pointer. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2047 |
// offset is the additional increment which is added to addr to form the new SP. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2048 |
// Note: specify a negative value to reserve more space! |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2049 |
// load_fp == true only indicates that fp is not pre-filled with the frame pointer. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2050 |
// It does not guarantee that fp contains the frame pointer at the end. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2051 |
void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2052 |
assert_different_registers(newSP, fp, Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2053 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2054 |
if (load_fp) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2055 |
z_lg(fp, _z_abi(callers_sp), Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2056 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2057 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2058 |
add2reg(Z_SP, offset, newSP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2059 |
z_stg(fp, _z_abi(callers_sp), Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2060 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2061 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2062 |
// Resize_frame with SP(new) = [newSP]. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2063 |
// load_fp == true only indicates that fp is not pre-filled with the frame pointer. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2064 |
// It does not guarantee that fp contains the frame pointer at the end. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2065 |
void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2066 |
assert_different_registers(newSP, fp, Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2067 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2068 |
if (load_fp) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2069 |
z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2070 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2071 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2072 |
z_lgr(Z_SP, newSP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2073 |
if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2074 |
z_stg(fp, _z_abi(callers_sp), newSP); |
42065 | 2075 |
} else { |
2076 |
z_stg(fp, _z_abi(callers_sp), Z_SP); |
|
2077 |
} |
|
2078 |
} |
|
2079 |
||
2080 |
// Resize_frame with SP(new) = SP(old) + offset. |
|
2081 |
void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { |
|
2082 |
assert_different_registers(fp, Z_SP); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2083 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2084 |
if (load_fp) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2085 |
z_lg(fp, _z_abi(callers_sp), Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2086 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2087 |
add64(Z_SP, offset); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2088 |
z_stg(fp, _z_abi(callers_sp), Z_SP); |
42065 | 2089 |
} |
2090 |
||
2091 |
void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { |
|
2092 |
#ifdef ASSERT |
|
2093 |
assert_different_registers(bytes, old_sp, Z_SP); |
|
2094 |
if (!copy_sp) { |
|
2095 |
z_cgr(old_sp, Z_SP); |
|
2096 |
asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); |
|
2097 |
} |
|
2098 |
#endif |
|
2099 |
if (copy_sp) { z_lgr(old_sp, Z_SP); } |
|
2100 |
if (bytes_with_inverted_sign) { |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2101 |
z_agr(Z_SP, bytes); |
42065 | 2102 |
} else { |
2103 |
z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2104 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2105 |
z_stg(old_sp, _z_abi(callers_sp), Z_SP); |
42065 | 2106 |
} |
2107 |
||
2108 |
unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { |
|
2109 |
long offset = Assembler::align(bytes, frame::alignment_in_bytes); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2110 |
assert(offset > 0, "should push a frame with positive size, size = %ld.", offset); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2111 |
assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2112 |
|
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2113 |
// We must not write outside the current stack bounds (given by Z_SP). |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2114 |
// Thus, we have to first update Z_SP and then store the previous SP as stack linkage. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2115 |
// We rely on Z_R0 by default to be available as scratch. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2116 |
z_lgr(scratch, Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2117 |
add2reg(Z_SP, -offset); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2118 |
z_stg(scratch, _z_abi(callers_sp), Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2119 |
#ifdef ASSERT |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2120 |
// Just make sure nobody uses the value in the default scratch register. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2121 |
// When another register is used, the caller might rely on it containing the frame pointer. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2122 |
if (scratch == Z_R0) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2123 |
z_iihf(scratch, 0xbaadbabe); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2124 |
z_iilf(scratch, 0xdeadbeef); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2125 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2126 |
#endif |
42065 | 2127 |
return offset; |
2128 |
} |
|
2129 |
||
2130 |
// Push a frame of size `bytes' plus abi160 on top. |
|
2131 |
unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { |
|
2132 |
BLOCK_COMMENT("push_frame_abi160 {"); |
|
2133 |
unsigned int res = push_frame(bytes + frame::z_abi_160_size); |
|
2134 |
BLOCK_COMMENT("} push_frame_abi160"); |
|
2135 |
return res; |
|
2136 |
} |
|
2137 |
||
2138 |
// Pop current C frame. |
|
2139 |
void MacroAssembler::pop_frame() { |
|
2140 |
BLOCK_COMMENT("pop_frame:"); |
|
2141 |
Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); |
|
2142 |
} |
|
2143 |
||
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2144 |
// Pop current C frame and restore return PC register (Z_R14). |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2145 |
void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2146 |
BLOCK_COMMENT("pop_frame_restore_retPC:"); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2147 |
int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes; |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2148 |
// If possible, pop frame by add instead of load (a penny saved is a penny got :-). |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2149 |
if (Displacement::is_validDisp(retPC_offset)) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2150 |
z_lg(Z_R14, retPC_offset, Z_SP); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2151 |
add2reg(Z_SP, frame_size_in_bytes); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2152 |
} else { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2153 |
add2reg(Z_SP, frame_size_in_bytes); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2154 |
restore_return_pc(); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2155 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2156 |
} |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
2157 |
|
42065 | 2158 |
void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { |
2159 |
if (allow_relocation) { |
|
2160 |
call_c(entry_point); |
|
2161 |
} else { |
|
2162 |
call_c_static(entry_point); |
|
2163 |
} |
|
2164 |
} |
|
2165 |
||
2166 |
void MacroAssembler::call_VM_leaf_base(address entry_point) { |
|
2167 |
bool allow_relocation = true; |
|
2168 |
call_VM_leaf_base(entry_point, allow_relocation); |
|
2169 |
} |
|
2170 |
||
2171 |
void MacroAssembler::call_VM_base(Register oop_result, |
|
2172 |
Register last_java_sp, |
|
2173 |
address entry_point, |
|
2174 |
bool allow_relocation, |
|
2175 |
bool check_exceptions) { // Defaults to true. |
|
2176 |
// Allow_relocation indicates, if true, that the generated code shall |
|
2177 |
// be fit for code relocation or referenced data relocation. In other |
|
2178 |
// words: all addresses must be considered variable. PC-relative addressing |
|
2179 |
// is not possible then. |
|
2180 |
// On the other hand, if (allow_relocation == false), addresses and offsets |
|
2181 |
// may be considered stable, enabling us to take advantage of some PC-relative |
|
2182 |
// addressing tweaks. These might improve performance and reduce code size. |
|
2183 |
||
2184 |
// Determine last_java_sp register. |
|
2185 |
if (!last_java_sp->is_valid()) { |
|
2186 |
last_java_sp = Z_SP; // Load Z_SP as SP. |
|
2187 |
} |
|
2188 |
||
2189 |
set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); |
|
2190 |
||
2191 |
// ARG1 must hold thread address. |
|
2192 |
z_lgr(Z_ARG1, Z_thread); |
|
2193 |
||
2194 |
address return_pc = NULL; |
|
2195 |
if (allow_relocation) { |
|
2196 |
return_pc = call_c(entry_point); |
|
2197 |
} else { |
|
2198 |
return_pc = call_c_static(entry_point); |
|
2199 |
} |
|
2200 |
||
2201 |
reset_last_Java_frame(allow_relocation); |
|
2202 |
||
2203 |
// C++ interp handles this in the interpreter. |
|
2204 |
check_and_handle_popframe(Z_thread); |
|
2205 |
check_and_handle_earlyret(Z_thread); |
|
2206 |
||
2207 |
// Check for pending exceptions. |
|
2208 |
if (check_exceptions) { |
|
2209 |
// Check for pending exceptions (java_thread is set upon return). |
|
2210 |
load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); |
|
2211 |
||
2212 |
// This used to conditionally jump to forward_exception however it is |
|
2213 |
// possible if we relocate that the branch will not reach. So we must jump |
|
2214 |
// around so we can always reach. |
|
2215 |
||
2216 |
Label ok; |
|
2217 |
z_bre(ok); // Bcondequal is the same as bcondZero. |
|
2218 |
call_stub(StubRoutines::forward_exception_entry()); |
|
2219 |
bind(ok); |
|
2220 |
} |
|
2221 |
||
2222 |
// Get oop result if there is one and reset the value in the thread. |
|
2223 |
if (oop_result->is_valid()) { |
|
2224 |
get_vm_result(oop_result); |
|
2225 |
} |
|
2226 |
||
2227 |
_last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. |
|
2228 |
} |
|
2229 |
||
2230 |
void MacroAssembler::call_VM_base(Register oop_result, |
|
2231 |
Register last_java_sp, |
|
2232 |
address entry_point, |
|
2233 |
bool check_exceptions) { // Defaults to true. |
|
2234 |
bool allow_relocation = true; |
|
2235 |
call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); |
|
2236 |
} |
|
2237 |
||
2238 |
// VM calls without explicit last_java_sp. |
|
2239 |
||
2240 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { |
|
2241 |
// Call takes possible detour via InterpreterMacroAssembler. |
|
2242 |
call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); |
|
2243 |
} |
|
2244 |
||
2245 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { |
|
2246 |
// Z_ARG1 is reserved for the thread. |
|
2247 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2248 |
call_VM(oop_result, entry_point, check_exceptions); |
|
2249 |
} |
|
2250 |
||
2251 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { |
|
2252 |
// Z_ARG1 is reserved for the thread. |
|
2253 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2254 |
assert(arg_2 != Z_ARG2, "smashed argument"); |
|
2255 |
lgr_if_needed(Z_ARG3, arg_2); |
|
2256 |
call_VM(oop_result, entry_point, check_exceptions); |
|
2257 |
} |
|
2258 |
||
2259 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, |
|
2260 |
Register arg_3, bool check_exceptions) { |
|
2261 |
// Z_ARG1 is reserved for the thread. |
|
2262 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2263 |
assert(arg_2 != Z_ARG2, "smashed argument"); |
|
2264 |
lgr_if_needed(Z_ARG3, arg_2); |
|
2265 |
assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); |
|
2266 |
lgr_if_needed(Z_ARG4, arg_3); |
|
2267 |
call_VM(oop_result, entry_point, check_exceptions); |
|
2268 |
} |
|
2269 |
||
2270 |
// VM static calls without explicit last_java_sp. |
|
2271 |
||
2272 |
void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { |
|
2273 |
// Call takes possible detour via InterpreterMacroAssembler. |
|
2274 |
call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); |
|
2275 |
} |
|
2276 |
||
2277 |
void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, |
|
2278 |
Register arg_3, bool check_exceptions) { |
|
2279 |
// Z_ARG1 is reserved for the thread. |
|
2280 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2281 |
assert(arg_2 != Z_ARG2, "smashed argument"); |
|
2282 |
lgr_if_needed(Z_ARG3, arg_2); |
|
2283 |
assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); |
|
2284 |
lgr_if_needed(Z_ARG4, arg_3); |
|
2285 |
call_VM_static(oop_result, entry_point, check_exceptions); |
|
2286 |
} |
|
2287 |
||
2288 |
// VM calls with explicit last_java_sp. |
|
2289 |
||
2290 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { |
|
2291 |
// Call takes possible detour via InterpreterMacroAssembler. |
|
2292 |
call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); |
|
2293 |
} |
|
2294 |
||
2295 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { |
|
2296 |
// Z_ARG1 is reserved for the thread. |
|
2297 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2298 |
call_VM(oop_result, last_java_sp, entry_point, check_exceptions); |
|
2299 |
} |
|
2300 |
||
2301 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, |
|
2302 |
Register arg_2, bool check_exceptions) { |
|
2303 |
// Z_ARG1 is reserved for the thread. |
|
2304 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2305 |
assert(arg_2 != Z_ARG2, "smashed argument"); |
|
2306 |
lgr_if_needed(Z_ARG3, arg_2); |
|
2307 |
call_VM(oop_result, last_java_sp, entry_point, check_exceptions); |
|
2308 |
} |
|
2309 |
||
2310 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, |
|
2311 |
Register arg_2, Register arg_3, bool check_exceptions) { |
|
2312 |
// Z_ARG1 is reserved for the thread. |
|
2313 |
lgr_if_needed(Z_ARG2, arg_1); |
|
2314 |
assert(arg_2 != Z_ARG2, "smashed argument"); |
|
2315 |
lgr_if_needed(Z_ARG3, arg_2); |
|
2316 |
assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); |
|
2317 |
lgr_if_needed(Z_ARG4, arg_3); |
|
2318 |
call_VM(oop_result, last_java_sp, entry_point, check_exceptions); |
|
2319 |
} |
|
2320 |
||
2321 |
// VM leaf calls. |
|
2322 |
||
2323 |
void MacroAssembler::call_VM_leaf(address entry_point) { |
|
2324 |
// Call takes possible detour via InterpreterMacroAssembler. |
|
2325 |
call_VM_leaf_base(entry_point, true); |
|
2326 |
} |
|
2327 |
||
2328 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { |
|
2329 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2330 |
call_VM_leaf(entry_point); |
|
2331 |
} |
|
2332 |
||
2333 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { |
|
2334 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2335 |
assert(arg_2 != Z_ARG1, "smashed argument"); |
|
2336 |
if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); |
|
2337 |
call_VM_leaf(entry_point); |
|
2338 |
} |
|
2339 |
||
2340 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { |
|
2341 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2342 |
assert(arg_2 != Z_ARG1, "smashed argument"); |
|
2343 |
if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); |
|
2344 |
assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); |
|
2345 |
if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); |
|
2346 |
call_VM_leaf(entry_point); |
|
2347 |
} |
|
2348 |
||
2349 |
// Static VM leaf calls. |
|
2350 |
// Really static VM leaf calls are never patched. |
|
2351 |
||
2352 |
void MacroAssembler::call_VM_leaf_static(address entry_point) { |
|
2353 |
// Call takes possible detour via InterpreterMacroAssembler. |
|
2354 |
call_VM_leaf_base(entry_point, false); |
|
2355 |
} |
|
2356 |
||
2357 |
void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { |
|
2358 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2359 |
call_VM_leaf_static(entry_point); |
|
2360 |
} |
|
2361 |
||
2362 |
void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { |
|
2363 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2364 |
assert(arg_2 != Z_ARG1, "smashed argument"); |
|
2365 |
if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); |
|
2366 |
call_VM_leaf_static(entry_point); |
|
2367 |
} |
|
2368 |
||
2369 |
void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { |
|
2370 |
if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); |
|
2371 |
assert(arg_2 != Z_ARG1, "smashed argument"); |
|
2372 |
if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); |
|
2373 |
assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); |
|
2374 |
if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); |
|
2375 |
call_VM_leaf_static(entry_point); |
|
2376 |
} |
|
2377 |
||
2378 |
// Don't use detour via call_c(reg). |
|
2379 |
address MacroAssembler::call_c(address function_entry) { |
|
2380 |
load_const(Z_R1, function_entry); |
|
2381 |
return call(Z_R1); |
|
2382 |
} |
|
2383 |
||
2384 |
// Variant for really static (non-relocatable) calls which are never patched. |
|
2385 |
address MacroAssembler::call_c_static(address function_entry) { |
|
2386 |
load_absolute_address(Z_R1, function_entry); |
|
2387 |
#if 0 // def ASSERT |
|
2388 |
// Verify that call site did not move. |
|
2389 |
load_const_optimized(Z_R0, function_entry); |
|
2390 |
z_cgr(Z_R1, Z_R0); |
|
2391 |
z_brc(bcondEqual, 3); |
|
2392 |
z_illtrap(0xba); |
|
2393 |
#endif |
|
2394 |
return call(Z_R1); |
|
2395 |
} |
|
2396 |
||
2397 |
address MacroAssembler::call_c_opt(address function_entry) { |
|
2398 |
bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); |
|
2399 |
_last_calls_return_pc = success ? pc() : NULL; |
|
2400 |
return _last_calls_return_pc; |
|
2401 |
} |
|
2402 |
||
2403 |
// Identify a call_far_patchable instruction: LARL + LG + BASR |
|
2404 |
// |
|
2405 |
// nop ; optionally, if required for alignment |
|
2406 |
// lgrl rx,A(TOC entry) ; PC-relative access into constant pool |
|
2407 |
// basr Z_R14,rx ; end of this instruction must be aligned to a word boundary |
|
2408 |
// |
|
2409 |
// Code pattern will eventually get patched into variant2 (see below for detection code). |
|
2410 |
// |
|
2411 |
bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { |
|
2412 |
address iaddr = instruction_addr; |
|
2413 |
||
2414 |
// Check for the actual load instruction. |
|
2415 |
if (!is_load_const_from_toc(iaddr)) { return false; } |
|
2416 |
iaddr += load_const_from_toc_size(); |
|
2417 |
||
2418 |
// Check for the call (BASR) instruction, finally. |
|
2419 |
assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); |
|
2420 |
return is_call_byregister(iaddr); |
|
2421 |
} |
|
2422 |
||
2423 |
// Identify a call_far_patchable instruction: BRASL |
|
2424 |
// |
|
2425 |
// Code pattern to suits atomic patching: |
|
2426 |
// nop ; Optionally, if required for alignment. |
|
2427 |
// nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). |
|
2428 |
// nop ; For code pattern detection: Prepend each BRASL with a nop. |
|
2429 |
// brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! |
|
2430 |
bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { |
|
2431 |
const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); |
|
2432 |
||
2433 |
// Check for correct number of leading nops. |
|
2434 |
address iaddr; |
|
2435 |
for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { |
|
2436 |
if (!is_z_nop(iaddr)) { return false; } |
|
2437 |
} |
|
2438 |
assert(iaddr == call_addr, "sanity"); |
|
2439 |
||
2440 |
// --> Check for call instruction. |
|
2441 |
if (is_call_far_pcrelative(call_addr)) { |
|
2442 |
assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); |
|
2443 |
return true; |
|
2444 |
} |
|
2445 |
||
2446 |
return false; |
|
2447 |
} |
|
2448 |
||
2449 |
// Emit a NOT mt-safely patchable 64 bit absolute call. |
|
2450 |
// If toc_offset == -2, then the destination of the call (= target) is emitted |
|
2451 |
// to the constant pool and a runtime_call relocation is added |
|
2452 |
// to the code buffer. |
|
2453 |
// If toc_offset != -2, target must already be in the constant pool at |
|
2454 |
// _ctableStart+toc_offset (a caller can retrieve toc_offset |
|
2455 |
// from the runtime_call relocation). |
|
2456 |
// Special handling of emitting to scratch buffer when there is no constant pool. |
|
2457 |
// Slightly changed code pattern. We emit an additional nop if we would |
|
2458 |
// not end emitting at a word aligned address. This is to ensure |
|
2459 |
// an atomically patchable displacement in brasl instructions. |
|
2460 |
// |
|
2461 |
// A call_far_patchable comes in different flavors: |
|
2462 |
// - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) |
|
2463 |
// - LGRL(CP) / BR (address in constant pool, pc-relative accesss) |
|
2464 |
// - BRASL (relative address of call target coded in instruction) |
|
2465 |
// All flavors occupy the same amount of space. Length differences are compensated |
|
2466 |
// by leading nops, such that the instruction sequence always ends at the same |
|
2467 |
// byte offset. This is required to keep the return offset constant. |
|
2468 |
// Furthermore, the return address (the end of the instruction sequence) is forced |
|
2469 |
// to be on a 4-byte boundary. This is required for atomic patching, should we ever |
|
2470 |
// need to patch the call target of the BRASL flavor. |
|
2471 |
// RETURN value: false, if no constant pool entry could be allocated, true otherwise. |
|
2472 |
bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { |
|
2473 |
// Get current pc and ensure word alignment for end of instr sequence. |
|
2474 |
const address start_pc = pc(); |
|
2475 |
const intptr_t start_off = offset(); |
|
2476 |
assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); |
|
2477 |
const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. |
|
2478 |
const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); |
|
2479 |
const bool emit_relative_call = !emit_target_to_pool && |
|
2480 |
RelAddr::is_in_range_of_RelAddr32(dist) && |
|
2481 |
ReoptimizeCallSequences && |
|
2482 |
!code_section()->scratch_emit(); |
|
2483 |
||
2484 |
if (emit_relative_call) { |
|
2485 |
// Add padding to get the same size as below. |
|
2486 |
const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); |
|
2487 |
unsigned int current_padding; |
|
2488 |
for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } |
|
2489 |
assert(current_padding == padding, "sanity"); |
|
2490 |
||
2491 |
// relative call: len = 2(nop) + 6 (brasl) |
|
2492 |
// CodeBlob resize cannot occur in this case because |
|
2493 |
// this call is emitted into pre-existing space. |
|
2494 |
z_nop(); // Prepend each BRASL with a nop. |
|
2495 |
z_brasl(Z_R14, target); |
|
2496 |
} else { |
|
2497 |
// absolute call: Get address from TOC. |
|
2498 |
// len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} |
|
2499 |
if (emit_target_to_pool) { |
|
2500 |
// When emitting the call for the first time, we do not need to use |
|
2501 |
// the pc-relative version. It will be patched anyway, when the code |
|
2502 |
// buffer is copied. |
|
2503 |
// Relocation is not needed when !ReoptimizeCallSequences. |
|
2504 |
relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; |
|
2505 |
AddressLiteral dest(target, rt); |
|
2506 |
// Store_oop_in_toc() adds dest to the constant table. As side effect, this kills |
|
2507 |
// inst_mark(). Reset if possible. |
|
2508 |
bool reset_mark = (inst_mark() == pc()); |
|
2509 |
tocOffset = store_oop_in_toc(dest); |
|
2510 |
if (reset_mark) { set_inst_mark(); } |
|
2511 |
if (tocOffset == -1) { |
|
2512 |
return false; // Couldn't create constant pool entry. |
|
2513 |
} |
|
2514 |
} |
|
2515 |
assert(offset() == start_off, "emit no code before this point!"); |
|
2516 |
||
2517 |
address tocPos = pc() + tocOffset; |
|
2518 |
if (emit_target_to_pool) { |
|
2519 |
tocPos = code()->consts()->start() + tocOffset; |
|
2520 |
} |
|
2521 |
load_long_pcrelative(Z_R14, tocPos); |
|
2522 |
z_basr(Z_R14, Z_R14); |
|
2523 |
} |
|
2524 |
||
2525 |
#ifdef ASSERT |
|
2526 |
// Assert that we can identify the emitted call. |
|
2527 |
assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); |
|
2528 |
assert(offset() == start_off+call_far_patchable_size(), "wrong size"); |
|
2529 |
||
2530 |
if (emit_target_to_pool) { |
|
2531 |
assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, |
|
2532 |
"wrong encoding of dest address"); |
|
2533 |
} |
|
2534 |
#endif |
|
2535 |
return true; // success |
|
2536 |
} |
|
2537 |
||
2538 |
// Identify a call_far_patchable instruction. |
|
2539 |
// For more detailed information see header comment of call_far_patchable. |
|
2540 |
bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { |
|
2541 |
return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL |
|
2542 |
is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR |
|
2543 |
} |
|
2544 |
||
2545 |
// Does the call_far_patchable instruction use a pc-relative encoding |
|
2546 |
// of the call destination? |
|
2547 |
bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { |
|
2548 |
// Variant 2 is pc-relative. |
|
2549 |
return is_call_far_patchable_variant2_at(instruction_addr); |
|
2550 |
} |
|
2551 |
||
2552 |
bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { |
|
2553 |
// Prepend each BRASL with a nop. |
|
2554 |
return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. |
|
2555 |
} |
|
2556 |
||
2557 |
// Set destination address of a call_far_patchable instruction. |
|
2558 |
void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { |
|
2559 |
ResourceMark rm; |
|
2560 |
||
2561 |
// Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). |
|
2562 |
int code_size = MacroAssembler::call_far_patchable_size(); |
|
2563 |
CodeBuffer buf(instruction_addr, code_size); |
|
2564 |
MacroAssembler masm(&buf); |
|
2565 |
masm.call_far_patchable(dest, tocOffset); |
|
2566 |
ICache::invalidate_range(instruction_addr, code_size); // Empty on z. |
|
2567 |
} |
|
2568 |
||
2569 |
// Get dest address of a call_far_patchable instruction. |
|
2570 |
address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { |
|
2571 |
// Dynamic TOC: absolute address in constant pool. |
|
2572 |
// Check variant2 first, it is more frequent. |
|
2573 |
||
2574 |
// Relative address encoded in call instruction. |
|
2575 |
if (is_call_far_patchable_variant2_at(instruction_addr)) { |
|
2576 |
return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. |
|
2577 |
||
2578 |
// Absolute address in constant pool. |
|
2579 |
} else if (is_call_far_patchable_variant0_at(instruction_addr)) { |
|
2580 |
address iaddr = instruction_addr; |
|
2581 |
||
2582 |
long tocOffset = get_load_const_from_toc_offset(iaddr); |
|
2583 |
address tocLoc = iaddr + tocOffset; |
|
2584 |
return *(address *)(tocLoc); |
|
2585 |
} else { |
|
2586 |
fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); |
|
2587 |
fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", |
|
2588 |
*(unsigned long*)instruction_addr, |
|
2589 |
*(unsigned long*)(instruction_addr+8), |
|
2590 |
call_far_patchable_size()); |
|
2591 |
Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); |
|
2592 |
ShouldNotReachHere(); |
|
2593 |
return NULL; |
|
2594 |
} |
|
2595 |
} |
|
2596 |
||
2597 |
void MacroAssembler::align_call_far_patchable(address pc) { |
|
2598 |
if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } |
|
2599 |
} |
|
2600 |
||
2601 |
void MacroAssembler::check_and_handle_earlyret(Register java_thread) { |
|
2602 |
} |
|
2603 |
||
2604 |
void MacroAssembler::check_and_handle_popframe(Register java_thread) { |
|
2605 |
} |
|
2606 |
||
2607 |
// Read from the polling page. |
|
2608 |
// Use TM or TMY instruction, depending on read offset. |
|
2609 |
// offset = 0: Use TM, safepoint polling. |
|
2610 |
// offset < 0: Use TMY, profiling safepoint polling. |
|
2611 |
void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { |
|
2612 |
if (Immediate::is_uimm12(offset)) { |
|
2613 |
z_tm(offset, polling_page_address, mask_safepoint); |
|
2614 |
} else { |
|
2615 |
z_tmy(offset, polling_page_address, mask_profiling); |
|
2616 |
} |
|
2617 |
} |
|
2618 |
||
2619 |
// Check whether z_instruction is a read access to the polling page |
|
2620 |
// which was emitted by load_from_polling_page(..). |
|
2621 |
bool MacroAssembler::is_load_from_polling_page(address instr_loc) { |
|
2622 |
unsigned long z_instruction; |
|
2623 |
unsigned int ilen = get_instruction(instr_loc, &z_instruction); |
|
2624 |
||
2625 |
if (ilen == 2) { return false; } // It's none of the allowed instructions. |
|
2626 |
||
2627 |
if (ilen == 4) { |
|
2628 |
if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. |
|
2629 |
||
2630 |
int ms = inv_mask(z_instruction,8,32); // mask |
|
2631 |
int ra = inv_reg(z_instruction,16,32); // base register |
|
2632 |
int ds = inv_uimm12(z_instruction); // displacement |
|
2633 |
||
2634 |
if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { |
|
2635 |
return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. |
|
2636 |
} |
|
2637 |
||
2638 |
} else { /* if (ilen == 6) */ |
|
2639 |
||
2640 |
assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); |
|
2641 |
||
2642 |
if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. |
|
2643 |
||
2644 |
int ms = inv_mask(z_instruction,8,48); // mask |
|
2645 |
int ra = inv_reg(z_instruction,16,48); // base register |
|
2646 |
int ds = inv_simm20(z_instruction); // displacement |
|
2647 |
} |
|
2648 |
||
2649 |
return true; |
|
2650 |
} |
|
2651 |
||
2652 |
// Extract poll address from instruction and ucontext. |
|
2653 |
address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { |
|
2654 |
assert(ucontext != NULL, "must have ucontext"); |
|
2655 |
ucontext_t* uc = (ucontext_t*) ucontext; |
|
2656 |
unsigned long z_instruction; |
|
2657 |
unsigned int ilen = get_instruction(instr_loc, &z_instruction); |
|
2658 |
||
2659 |
if (ilen == 4 && is_z_tm(z_instruction)) { |
|
2660 |
int ra = inv_reg(z_instruction, 16, 32); // base register |
|
2661 |
int ds = inv_uimm12(z_instruction); // displacement |
|
2662 |
address addr = (address)uc->uc_mcontext.gregs[ra]; |
|
2663 |
return addr + ds; |
|
2664 |
} else if (ilen == 6 && is_z_tmy(z_instruction)) { |
|
2665 |
int ra = inv_reg(z_instruction, 16, 48); // base register |
|
2666 |
int ds = inv_simm20(z_instruction); // displacement |
|
2667 |
address addr = (address)uc->uc_mcontext.gregs[ra]; |
|
2668 |
return addr + ds; |
|
2669 |
} |
|
2670 |
||
2671 |
ShouldNotReachHere(); |
|
2672 |
return NULL; |
|
2673 |
} |
|
2674 |
||
2675 |
// Extract poll register from instruction. |
|
2676 |
uint MacroAssembler::get_poll_register(address instr_loc) { |
|
2677 |
unsigned long z_instruction; |
|
2678 |
unsigned int ilen = get_instruction(instr_loc, &z_instruction); |
|
2679 |
||
2680 |
if (ilen == 4 && is_z_tm(z_instruction)) { |
|
2681 |
return (uint)inv_reg(z_instruction, 16, 32); // base register |
|
2682 |
} else if (ilen == 6 && is_z_tmy(z_instruction)) { |
|
2683 |
return (uint)inv_reg(z_instruction, 16, 48); // base register |
|
2684 |
} |
|
2685 |
||
2686 |
ShouldNotReachHere(); |
|
2687 |
return 0; |
|
2688 |
} |
|
2689 |
||
48332
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2690 |
void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2691 |
if (SafepointMechanism::uses_thread_local_poll()) { |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2692 |
const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2693 |
// Armed page has poll_bit set. |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2694 |
z_tm(poll_byte_addr, SafepointMechanism::poll_bit()); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2695 |
z_brnaz(slow_path); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2696 |
} else { |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2697 |
load_const_optimized(temp_reg, SafepointSynchronize::address_of_state()); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2698 |
z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2699 |
z_brne(slow_path); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2700 |
} |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2701 |
} |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48331
diff
changeset
|
2702 |
|
42065 | 2703 |
// Don't rely on register locking, always use Z_R1 as scratch register instead. |
2704 |
void MacroAssembler::bang_stack_with_offset(int offset) { |
|
2705 |
// Stack grows down, caller passes positive offset. |
|
2706 |
assert(offset > 0, "must bang with positive offset"); |
|
2707 |
if (Displacement::is_validDisp(-offset)) { |
|
2708 |
z_tmy(-offset, Z_SP, mask_stackbang); |
|
2709 |
} else { |
|
2710 |
add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! |
|
2711 |
z_tm(0, Z_R1, mask_stackbang); // Just banging. |
|
2712 |
} |
|
2713 |
} |
|
2714 |
||
43420
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2715 |
void MacroAssembler::reserved_stack_check(Register return_pc) { |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2716 |
// Test if reserved zone needs to be enabled. |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2717 |
Label no_reserved_zone_enabling; |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2718 |
assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub."); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2719 |
BLOCK_COMMENT("reserved_stack_check {"); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2720 |
|
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2721 |
z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2722 |
z_brl(no_reserved_zone_enabling); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2723 |
|
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2724 |
// Enable reserved zone again, throw stack overflow exception. |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2725 |
save_return_pc(); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2726 |
push_frame_abi160(0); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2727 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2728 |
pop_frame(); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2729 |
restore_return_pc(); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2730 |
|
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2731 |
load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2732 |
// Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2733 |
z_br(Z_R1); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2734 |
|
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2735 |
should_not_reach_here(); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2736 |
|
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2737 |
bind(no_reserved_zone_enabling); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2738 |
BLOCK_COMMENT("} reserved_stack_check"); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2739 |
} |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42065
diff
changeset
|
2740 |
|
42065 | 2741 |
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. |
2742 |
void MacroAssembler::tlab_allocate(Register obj, |
|
2743 |
Register var_size_in_bytes, |
|
2744 |
int con_size_in_bytes, |
|
2745 |
Register t1, |
|
2746 |
Label& slow_case) { |
|
2747 |
assert_different_registers(obj, var_size_in_bytes, t1); |
|
2748 |
Register end = t1; |
|
2749 |
Register thread = Z_thread; |
|
2750 |
||
2751 |
z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); |
|
2752 |
if (var_size_in_bytes == noreg) { |
|
2753 |
z_lay(end, Address(obj, con_size_in_bytes)); |
|
2754 |
} else { |
|
2755 |
z_lay(end, Address(obj, var_size_in_bytes)); |
|
2756 |
} |
|
2757 |
z_cg(end, Address(thread, JavaThread::tlab_end_offset())); |
|
2758 |
branch_optimized(bcondHigh, slow_case); |
|
2759 |
||
2760 |
// Update the tlab top pointer. |
|
2761 |
z_stg(end, Address(thread, JavaThread::tlab_top_offset())); |
|
2762 |
||
2763 |
// Recover var_size_in_bytes if necessary. |
|
2764 |
if (var_size_in_bytes == end) { |
|
2765 |
z_sgr(var_size_in_bytes, obj); |
|
2766 |
} |
|
2767 |
} |
|
2768 |
||
2769 |
// Emitter for interface method lookup. |
|
2770 |
// input: recv_klass, intf_klass, itable_index |
|
2771 |
// output: method_result |
|
2772 |
// kills: itable_index, temp1_reg, Z_R0, Z_R1 |
|
2773 |
// TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. |
|
2774 |
// If the register is still not needed then, remove it. |
|
2775 |
void MacroAssembler::lookup_interface_method(Register recv_klass, |
|
2776 |
Register intf_klass, |
|
2777 |
RegisterOrConstant itable_index, |
|
2778 |
Register method_result, |
|
2779 |
Register temp1_reg, |
|
48585
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2780 |
Label& no_such_interface, |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2781 |
bool return_method) { |
42065 | 2782 |
|
2783 |
const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. |
|
2784 |
const Register itable_entry_addr = Z_R1_scratch; |
|
2785 |
const Register itable_interface = Z_R0_scratch; |
|
2786 |
||
2787 |
BLOCK_COMMENT("lookup_interface_method {"); |
|
2788 |
||
2789 |
// Load start of itable entries into itable_entry_addr. |
|
46280
6eef3d367beb
8175269: [s390] cleanup calls to vtable_start_offset() and vtable_length_offset()
lucy
parents:
46272
diff
changeset
|
2790 |
z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset())); |
42065 | 2791 |
z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); |
2792 |
||
2793 |
// Loop over all itable entries until desired interfaceOop(Rinterface) found. |
|
46280
6eef3d367beb
8175269: [s390] cleanup calls to vtable_start_offset() and vtable_length_offset()
lucy
parents:
46272
diff
changeset
|
2794 |
const int vtable_base_offset = in_bytes(Klass::vtable_start_offset()); |
42065 | 2795 |
|
2796 |
add2reg_with_index(itable_entry_addr, |
|
2797 |
vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), |
|
2798 |
recv_klass, vtable_len); |
|
2799 |
||
2800 |
const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; |
|
2801 |
Label search; |
|
2802 |
||
2803 |
bind(search); |
|
2804 |
||
2805 |
// Handle IncompatibleClassChangeError. |
|
2806 |
// If the entry is NULL then we've reached the end of the table |
|
2807 |
// without finding the expected interface, so throw an exception. |
|
2808 |
load_and_test_long(itable_interface, Address(itable_entry_addr)); |
|
2809 |
z_bre(no_such_interface); |
|
2810 |
||
2811 |
add2reg(itable_entry_addr, itable_offset_search_inc); |
|
2812 |
z_cgr(itable_interface, intf_klass); |
|
2813 |
z_brne(search); |
|
2814 |
||
2815 |
// Entry found and itable_entry_addr points to it, get offset of vtable for interface. |
|
48585
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2816 |
if (return_method) { |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2817 |
const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2818 |
itableOffsetEntry::interface_offset_in_bytes()) - |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2819 |
itable_offset_search_inc; |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2820 |
|
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2821 |
// Compute itableMethodEntry and get method and entry point |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2822 |
// we use addressing with index and displacement, since the formula |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2823 |
// for computing the entry's offset has a fixed and a dynamic part, |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2824 |
// the latter depending on the matched interface entry and on the case, |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2825 |
// that the itable index has been passed as a register, not a constant value. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2826 |
int method_offset = itableMethodEntry::method_offset_in_bytes(); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2827 |
// Fixed part (displacement), common operand. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2828 |
Register itable_offset = method_result; // Dynamic part (index register). |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2829 |
|
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2830 |
if (itable_index.is_register()) { |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2831 |
// Compute the method's offset in that register, for the formula, see the |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2832 |
// else-clause below. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2833 |
z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize)); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2834 |
z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2835 |
} else { |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2836 |
// Displacement increases. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2837 |
method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2838 |
|
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2839 |
// Load index from itable. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2840 |
z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2841 |
} |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2842 |
|
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2843 |
// Finally load the method's oop. |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2844 |
z_lg(method_result, method_offset, itable_offset, recv_klass); |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
2845 |
} |
42065 | 2846 |
BLOCK_COMMENT("} lookup_interface_method"); |
2847 |
} |
|
2848 |
||
2849 |
// Lookup for virtual method invocation. |
|
2850 |
void MacroAssembler::lookup_virtual_method(Register recv_klass, |
|
2851 |
RegisterOrConstant vtable_index, |
|
2852 |
Register method_result) { |
|
2853 |
assert_different_registers(recv_klass, vtable_index.register_or_noreg()); |
|
2854 |
assert(vtableEntry::size() * wordSize == wordSize, |
|
2855 |
"else adjust the scaling in the code below"); |
|
2856 |
||
2857 |
BLOCK_COMMENT("lookup_virtual_method {"); |
|
2858 |
||
2859 |
const int base = in_bytes(Klass::vtable_start_offset()); |
|
2860 |
||
2861 |
if (vtable_index.is_constant()) { |
|
2862 |
// Load with base + disp. |
|
2863 |
Address vtable_entry_addr(recv_klass, |
|
2864 |
vtable_index.as_constant() * wordSize + |
|
2865 |
base + |
|
2866 |
vtableEntry::method_offset_in_bytes()); |
|
2867 |
||
2868 |
z_lg(method_result, vtable_entry_addr); |
|
2869 |
} else { |
|
2870 |
// Shift index properly and load with base + index + disp. |
|
2871 |
Register vindex = vtable_index.as_register(); |
|
2872 |
Address vtable_entry_addr(recv_klass, vindex, |
|
2873 |
base + vtableEntry::method_offset_in_bytes()); |
|
2874 |
||
2875 |
z_sllg(vindex, vindex, exact_log2(wordSize)); |
|
2876 |
z_lg(method_result, vtable_entry_addr); |
|
2877 |
} |
|
2878 |
BLOCK_COMMENT("} lookup_virtual_method"); |
|
2879 |
} |
|
2880 |
||
2881 |
// Factor out code to call ic_miss_handler. |
|
2882 |
// Generate code to call the inline cache miss handler. |
|
2883 |
// |
|
2884 |
// In most cases, this code will be generated out-of-line. |
|
2885 |
// The method parameters are intended to provide some variability. |
|
2886 |
// ICM - Label which has to be bound to the start of useful code (past any traps). |
|
2887 |
// trapMarker - Marking byte for the generated illtrap instructions (if any). |
|
2888 |
// Any value except 0x00 is supported. |
|
2889 |
// = 0x00 - do not generate illtrap instructions. |
|
2890 |
// use nops to fill ununsed space. |
|
2891 |
// requiredSize - required size of the generated code. If the actually |
|
2892 |
// generated code is smaller, use padding instructions to fill up. |
|
2893 |
// = 0 - no size requirement, no padding. |
|
2894 |
// scratch - scratch register to hold branch target address. |
|
2895 |
// |
|
2896 |
// The method returns the code offset of the bound label. |
|
2897 |
unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { |
|
2898 |
intptr_t startOffset = offset(); |
|
2899 |
||
2900 |
// Prevent entry at content_begin(). |
|
2901 |
if (trapMarker != 0) { |
|
2902 |
z_illtrap(trapMarker); |
|
2903 |
} |
|
2904 |
||
2905 |
// Load address of inline cache miss code into scratch register |
|
2906 |
// and branch to cache miss handler. |
|
2907 |
BLOCK_COMMENT("IC miss handler {"); |
|
2908 |
BIND(ICM); |
|
2909 |
unsigned int labelOffset = offset(); |
|
2910 |
AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); |
|
2911 |
||
2912 |
load_const_optimized(scratch, icmiss); |
|
2913 |
z_br(scratch); |
|
2914 |
||
2915 |
// Fill unused space. |
|
2916 |
if (requiredSize > 0) { |
|
2917 |
while ((offset() - startOffset) < requiredSize) { |
|
2918 |
if (trapMarker == 0) { |
|
2919 |
z_nop(); |
|
2920 |
} else { |
|
2921 |
z_illtrap(trapMarker); |
|
2922 |
} |
|
2923 |
} |
|
2924 |
} |
|
2925 |
BLOCK_COMMENT("} IC miss handler"); |
|
2926 |
return labelOffset; |
|
2927 |
} |
|
2928 |
||
2929 |
void MacroAssembler::nmethod_UEP(Label& ic_miss) { |
|
58959 | 2930 |
Register ic_reg = Z_inline_cache; |
42065 | 2931 |
int klass_offset = oopDesc::klass_offset_in_bytes(); |
2932 |
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { |
|
2933 |
if (VM_Version::has_CompareBranch()) { |
|
2934 |
z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); |
|
2935 |
} else { |
|
2936 |
z_ltgr(Z_ARG1, Z_ARG1); |
|
2937 |
z_bre(ic_miss); |
|
2938 |
} |
|
2939 |
} |
|
2940 |
// Compare cached class against klass from receiver. |
|
2941 |
compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); |
|
2942 |
z_brne(ic_miss); |
|
2943 |
} |
|
2944 |
||
2945 |
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, |
|
2946 |
Register super_klass, |
|
2947 |
Register temp1_reg, |
|
2948 |
Label* L_success, |
|
2949 |
Label* L_failure, |
|
2950 |
Label* L_slow_path, |
|
2951 |
RegisterOrConstant super_check_offset) { |
|
2952 |
||
2953 |
const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
2954 |
const int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
|
2955 |
||
2956 |
bool must_load_sco = (super_check_offset.constant_or_zero() == -1); |
|
2957 |
bool need_slow_path = (must_load_sco || |
|
2958 |
super_check_offset.constant_or_zero() == sc_offset); |
|
2959 |
||
2960 |
// Input registers must not overlap. |
|
2961 |
assert_different_registers(sub_klass, super_klass, temp1_reg); |
|
2962 |
if (super_check_offset.is_register()) { |
|
2963 |
assert_different_registers(sub_klass, super_klass, |
|
2964 |
super_check_offset.as_register()); |
|
2965 |
} else if (must_load_sco) { |
|
2966 |
assert(temp1_reg != noreg, "supply either a temp or a register offset"); |
|
2967 |
} |
|
2968 |
||
2969 |
const Register Rsuper_check_offset = temp1_reg; |
|
2970 |
||
2971 |
NearLabel L_fallthrough; |
|
2972 |
int label_nulls = 0; |
|
2973 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
2974 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
2975 |
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } |
|
2976 |
assert(label_nulls <= 1 || |
|
2977 |
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), |
|
2978 |
"at most one NULL in the batch, usually"); |
|
2979 |
||
2980 |
BLOCK_COMMENT("check_klass_subtype_fast_path {"); |
|
2981 |
// If the pointers are equal, we are done (e.g., String[] elements). |
|
2982 |
// This self-check enables sharing of secondary supertype arrays among |
|
2983 |
// non-primary types such as array-of-interface. Otherwise, each such |
|
2984 |
// type would need its own customized SSA. |
|
2985 |
// We move this check to the front of the fast path because many |
|
2986 |
// type checks are in fact trivially successful in this manner, |
|
2987 |
// so we get a nicely predicted branch right at the start of the check. |
|
2988 |
compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); |
|
2989 |
||
2990 |
// Check the supertype display, which is uint. |
|
2991 |
if (must_load_sco) { |
|
2992 |
z_llgf(Rsuper_check_offset, sco_offset, super_klass); |
|
2993 |
super_check_offset = RegisterOrConstant(Rsuper_check_offset); |
|
2994 |
} |
|
2995 |
Address super_check_addr(sub_klass, super_check_offset, 0); |
|
2996 |
z_cg(super_klass, super_check_addr); // compare w/ displayed supertype |
|
2997 |
||
2998 |
// This check has worked decisively for primary supers. |
|
2999 |
// Secondary supers are sought in the super_cache ('super_cache_addr'). |
|
3000 |
// (Secondary supers are interfaces and very deeply nested subtypes.) |
|
3001 |
// This works in the same check above because of a tricky aliasing |
|
3002 |
// between the super_cache and the primary super display elements. |
|
3003 |
// (The 'super_check_addr' can address either, as the case requires.) |
|
3004 |
// Note that the cache is updated below if it does not help us find |
|
3005 |
// what we need immediately. |
|
3006 |
// So if it was a primary super, we can just fail immediately. |
|
3007 |
// Otherwise, it's the slow path for us (no success at this point). |
|
3008 |
||
3009 |
// Hacked jmp, which may only be used just before L_fallthrough. |
|
3010 |
#define final_jmp(label) \ |
|
3011 |
if (&(label) == &L_fallthrough) { /*do nothing*/ } \ |
|
3012 |
else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ |
|
3013 |
||
3014 |
if (super_check_offset.is_register()) { |
|
3015 |
branch_optimized(Assembler::bcondEqual, *L_success); |
|
3016 |
z_cfi(super_check_offset.as_register(), sc_offset); |
|
3017 |
if (L_failure == &L_fallthrough) { |
|
3018 |
branch_optimized(Assembler::bcondEqual, *L_slow_path); |
|
3019 |
} else { |
|
3020 |
branch_optimized(Assembler::bcondNotEqual, *L_failure); |
|
3021 |
final_jmp(*L_slow_path); |
|
3022 |
} |
|
3023 |
} else if (super_check_offset.as_constant() == sc_offset) { |
|
3024 |
// Need a slow path; fast failure is impossible. |
|
3025 |
if (L_slow_path == &L_fallthrough) { |
|
3026 |
branch_optimized(Assembler::bcondEqual, *L_success); |
|
3027 |
} else { |
|
3028 |
branch_optimized(Assembler::bcondNotEqual, *L_slow_path); |
|
3029 |
final_jmp(*L_success); |
|
3030 |
} |
|
3031 |
} else { |
|
3032 |
// No slow path; it's a fast decision. |
|
3033 |
if (L_failure == &L_fallthrough) { |
|
3034 |
branch_optimized(Assembler::bcondEqual, *L_success); |
|
3035 |
} else { |
|
3036 |
branch_optimized(Assembler::bcondNotEqual, *L_failure); |
|
3037 |
final_jmp(*L_success); |
|
3038 |
} |
|
3039 |
} |
|
3040 |
||
3041 |
bind(L_fallthrough); |
|
3042 |
#undef local_brc |
|
3043 |
#undef final_jmp |
|
3044 |
BLOCK_COMMENT("} check_klass_subtype_fast_path"); |
|
3045 |
// fallthru (to slow path) |
|
3046 |
} |
|
3047 |
||
3048 |
void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, |
|
3049 |
Register Rsuperklass, |
|
3050 |
Register Rarray_ptr, // tmp |
|
3051 |
Register Rlength, // tmp |
|
3052 |
Label* L_success, |
|
3053 |
Label* L_failure) { |
|
3054 |
// Input registers must not overlap. |
|
3055 |
// Also check for R1 which is explicitely used here. |
|
3056 |
assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); |
|
51756 | 3057 |
NearLabel L_fallthrough; |
42065 | 3058 |
int label_nulls = 0; |
3059 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
3060 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
3061 |
assert(label_nulls <= 1, "at most one NULL in the batch"); |
|
3062 |
||
3063 |
const int ss_offset = in_bytes(Klass::secondary_supers_offset()); |
|
3064 |
const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
3065 |
||
3066 |
const int length_offset = Array<Klass*>::length_offset_in_bytes(); |
|
3067 |
const int base_offset = Array<Klass*>::base_offset_in_bytes(); |
|
3068 |
||
3069 |
// Hacked jmp, which may only be used just before L_fallthrough. |
|
3070 |
#define final_jmp(label) \ |
|
3071 |
if (&(label) == &L_fallthrough) { /*do nothing*/ } \ |
|
3072 |
else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ |
|
3073 |
||
3074 |
NearLabel loop_iterate, loop_count, match; |
|
3075 |
||
3076 |
BLOCK_COMMENT("check_klass_subtype_slow_path {"); |
|
3077 |
z_lg(Rarray_ptr, ss_offset, Rsubklass); |
|
3078 |
||
3079 |
load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); |
|
3080 |
branch_optimized(Assembler::bcondZero, *L_failure); |
|
3081 |
||
3082 |
// Oops in table are NO MORE compressed. |
|
3083 |
z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. |
|
3084 |
z_bre(match); // Shortcut for array length = 1. |
|
3085 |
||
3086 |
// No match yet, so we must walk the array's elements. |
|
3087 |
z_lngfr(Rlength, Rlength); |
|
3088 |
z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array |
|
3089 |
z_llill(Z_R1, BytesPerWord); // Set increment/end index. |
|
3090 |
add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord |
|
3091 |
z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord |
|
3092 |
z_bru(loop_count); |
|
3093 |
||
3094 |
BIND(loop_iterate); |
|
3095 |
z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. |
|
3096 |
z_bre(match); |
|
3097 |
BIND(loop_count); |
|
3098 |
z_brxlg(Rlength, Z_R1, loop_iterate); |
|
3099 |
||
3100 |
// Rsuperklass not found among secondary super classes -> failure. |
|
3101 |
branch_optimized(Assembler::bcondAlways, *L_failure); |
|
3102 |
||
3103 |
// Got a hit. Return success (zero result). Set cache. |
|
3104 |
// Cache load doesn't happen here. For speed it is directly emitted by the compiler. |
|
3105 |
||
3106 |
BIND(match); |
|
3107 |
||
3108 |
z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. |
|
3109 |
||
3110 |
final_jmp(*L_success); |
|
3111 |
||
3112 |
// Exit to the surrounding code. |
|
3113 |
BIND(L_fallthrough); |
|
3114 |
#undef local_brc |
|
3115 |
#undef final_jmp |
|
3116 |
BLOCK_COMMENT("} check_klass_subtype_slow_path"); |
|
3117 |
} |
|
3118 |
||
3119 |
// Emitter for combining fast and slow path. |
|
3120 |
void MacroAssembler::check_klass_subtype(Register sub_klass, |
|
3121 |
Register super_klass, |
|
3122 |
Register temp1_reg, |
|
3123 |
Register temp2_reg, |
|
3124 |
Label& L_success) { |
|
3125 |
NearLabel failure; |
|
3126 |
BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); |
|
3127 |
check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, |
|
3128 |
&L_success, &failure, NULL); |
|
3129 |
check_klass_subtype_slow_path(sub_klass, super_klass, |
|
3130 |
temp1_reg, temp2_reg, &L_success, NULL); |
|
3131 |
BIND(failure); |
|
3132 |
BLOCK_COMMENT("} check_klass_subtype"); |
|
3133 |
} |
|
3134 |
||
55343
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3135 |
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3136 |
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3137 |
|
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3138 |
Label L_fallthrough; |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3139 |
if (L_fast_path == NULL) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3140 |
L_fast_path = &L_fallthrough; |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3141 |
} else if (L_slow_path == NULL) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3142 |
L_slow_path = &L_fallthrough; |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3143 |
} |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3144 |
|
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3145 |
// Fast path check: class is fully initialized |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3146 |
z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3147 |
z_bre(*L_fast_path); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3148 |
|
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3149 |
// Fast path check: current thread is initializer thread |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3150 |
z_cg(thread, Address(klass, InstanceKlass::init_thread_offset())); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3151 |
if (L_slow_path == &L_fallthrough) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3152 |
z_bre(*L_fast_path); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3153 |
} else if (L_fast_path == &L_fallthrough) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3154 |
z_brne(*L_slow_path); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3155 |
} else { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3156 |
Unimplemented(); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3157 |
} |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3158 |
|
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3159 |
bind(L_fallthrough); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3160 |
} |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
3161 |
|
42065 | 3162 |
// Increment a counter at counter_address when the eq condition code is |
3163 |
// set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. |
|
3164 |
void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { |
|
3165 |
Label l; |
|
3166 |
z_brne(l); |
|
3167 |
load_const(tmp1_reg, counter_address); |
|
3168 |
add2mem_32(Address(tmp1_reg), 1, tmp2_reg); |
|
3169 |
z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. |
|
3170 |
bind(l); |
|
3171 |
} |
|
3172 |
||
3173 |
// Semantics are dependent on the slow_case label: |
|
3174 |
// If the slow_case label is not NULL, failure to biased-lock the object |
|
3175 |
// transfers control to the location of the slow_case label. If the |
|
3176 |
// object could be biased-locked, control is transferred to the done label. |
|
3177 |
// The condition code is unpredictable. |
|
3178 |
// |
|
3179 |
// If the slow_case label is NULL, failure to biased-lock the object results |
|
3180 |
// in a transfer of control to the done label with a condition code of not_equal. |
|
3181 |
// If the biased-lock could be successfully obtained, control is transfered to |
|
3182 |
// the done label with a condition code of equal. |
|
3183 |
// It is mandatory to react on the condition code At the done label. |
|
3184 |
// |
|
3185 |
void MacroAssembler::biased_locking_enter(Register obj_reg, |
|
3186 |
Register mark_reg, |
|
3187 |
Register temp_reg, |
|
3188 |
Register temp2_reg, // May be Z_RO! |
|
3189 |
Label &done, |
|
3190 |
Label *slow_case) { |
|
3191 |
assert(UseBiasedLocking, "why call this otherwise?"); |
|
3192 |
assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); |
|
3193 |
||
3194 |
Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. |
|
3195 |
||
3196 |
BLOCK_COMMENT("biased_locking_enter {"); |
|
3197 |
||
3198 |
// Biased locking |
|
3199 |
// See whether the lock is currently biased toward our thread and |
|
3200 |
// whether the epoch is still valid. |
|
3201 |
// Note that the runtime guarantees sufficient alignment of JavaThread |
|
3202 |
// pointers to allow age to be placed into low bits. |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3203 |
assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, |
42065 | 3204 |
"biased locking makes assumptions about bit layout"); |
3205 |
z_lr(temp_reg, mark_reg); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3206 |
z_nilf(temp_reg, markWord::biased_lock_mask_in_place); |
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3207 |
z_chi(temp_reg, markWord::biased_lock_pattern); |
42065 | 3208 |
z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. |
3209 |
||
3210 |
load_prototype_header(temp_reg, obj_reg); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3211 |
load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place)); |
42065 | 3212 |
|
3213 |
z_ogr(temp_reg, Z_thread); |
|
3214 |
z_xgr(temp_reg, mark_reg); |
|
3215 |
z_ngr(temp_reg, temp2_reg); |
|
3216 |
if (PrintBiasedLockingStatistics) { |
|
3217 |
increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); |
|
3218 |
// Restore mark_reg. |
|
3219 |
z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); |
|
3220 |
} |
|
3221 |
branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. |
|
3222 |
||
3223 |
Label try_revoke_bias; |
|
3224 |
Label try_rebias; |
|
3225 |
Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); |
|
3226 |
||
3227 |
//---------------------------------------------------------------------------- |
|
3228 |
// At this point we know that the header has the bias pattern and |
|
3229 |
// that we are not the bias owner in the current epoch. We need to |
|
3230 |
// figure out more details about the state of the header in order to |
|
3231 |
// know what operations can be legally performed on the object's |
|
3232 |
// header. |
|
3233 |
||
3234 |
// If the low three bits in the xor result aren't clear, that means |
|
3235 |
// the prototype header is no longer biased and we have to revoke |
|
3236 |
// the bias on this object. |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3237 |
z_tmll(temp_reg, markWord::biased_lock_mask_in_place); |
42065 | 3238 |
z_brnaz(try_revoke_bias); |
3239 |
||
3240 |
// Biasing is still enabled for this data type. See whether the |
|
3241 |
// epoch of the current bias is still valid, meaning that the epoch |
|
3242 |
// bits of the mark word are equal to the epoch bits of the |
|
3243 |
// prototype header. (Note that the prototype header's epoch bits |
|
3244 |
// only change at a safepoint.) If not, attempt to rebias the object |
|
3245 |
// toward the current thread. Note that we must be absolutely sure |
|
3246 |
// that the current epoch is invalid in order to do this because |
|
3247 |
// otherwise the manipulations it performs on the mark word are |
|
3248 |
// illegal. |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3249 |
z_tmll(temp_reg, markWord::epoch_mask_in_place); |
42065 | 3250 |
z_brnaz(try_rebias); |
3251 |
||
3252 |
//---------------------------------------------------------------------------- |
|
3253 |
// The epoch of the current bias is still valid but we know nothing |
|
3254 |
// about the owner; it might be set or it might be clear. Try to |
|
3255 |
// acquire the bias of the object using an atomic operation. If this |
|
3256 |
// fails we will go in to the runtime to revoke the object's bias. |
|
3257 |
// Note that we first construct the presumed unbiased header so we |
|
3258 |
// don't accidentally blow away another thread's valid bias. |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3259 |
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | |
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3260 |
markWord::epoch_mask_in_place); |
42065 | 3261 |
z_lgr(temp_reg, Z_thread); |
3262 |
z_llgfr(mark_reg, mark_reg); |
|
3263 |
z_ogr(temp_reg, mark_reg); |
|
3264 |
||
3265 |
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
|
3266 |
||
3267 |
z_csg(mark_reg, temp_reg, 0, obj_reg); |
|
3268 |
||
3269 |
// If the biasing toward our thread failed, this means that |
|
3270 |
// another thread succeeded in biasing it toward itself and we |
|
3271 |
// need to revoke that bias. The revocation will occur in the |
|
3272 |
// interpreter runtime in the slow case. |
|
3273 |
||
3274 |
if (PrintBiasedLockingStatistics) { |
|
3275 |
increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), |
|
3276 |
temp_reg, temp2_reg); |
|
3277 |
} |
|
3278 |
if (slow_case != NULL) { |
|
3279 |
branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. |
|
3280 |
} |
|
3281 |
branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. |
|
3282 |
||
3283 |
//---------------------------------------------------------------------------- |
|
3284 |
bind(try_rebias); |
|
3285 |
// At this point we know the epoch has expired, meaning that the |
|
3286 |
// current "bias owner", if any, is actually invalid. Under these |
|
3287 |
// circumstances _only_, we are allowed to use the current header's |
|
3288 |
// value as the comparison value when doing the cas to acquire the |
|
3289 |
// bias in the current epoch. In other words, we allow transfer of |
|
3290 |
// the bias from one thread to another directly in this situation. |
|
3291 |
||
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3292 |
z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place); |
42065 | 3293 |
load_prototype_header(temp_reg, obj_reg); |
3294 |
z_llgfr(mark_reg, mark_reg); |
|
3295 |
||
3296 |
z_ogr(temp_reg, Z_thread); |
|
3297 |
||
3298 |
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
|
3299 |
||
3300 |
z_csg(mark_reg, temp_reg, 0, obj_reg); |
|
3301 |
||
3302 |
// If the biasing toward our thread failed, this means that |
|
3303 |
// another thread succeeded in biasing it toward itself and we |
|
3304 |
// need to revoke that bias. The revocation will occur in the |
|
3305 |
// interpreter runtime in the slow case. |
|
3306 |
||
3307 |
if (PrintBiasedLockingStatistics) { |
|
3308 |
increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); |
|
3309 |
} |
|
3310 |
if (slow_case != NULL) { |
|
3311 |
branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. |
|
3312 |
} |
|
3313 |
z_bru(done); // Biased lock status given in condition code. |
|
3314 |
||
3315 |
//---------------------------------------------------------------------------- |
|
3316 |
bind(try_revoke_bias); |
|
3317 |
// The prototype mark in the klass doesn't have the bias bit set any |
|
3318 |
// more, indicating that objects of this data type are not supposed |
|
3319 |
// to be biased any more. We are going to try to reset the mark of |
|
3320 |
// this object to the prototype value and fall through to the |
|
3321 |
// CAS-based locking scheme. Note that if our CAS fails, it means |
|
3322 |
// that another thread raced us for the privilege of revoking the |
|
3323 |
// bias of this particular object, so it's okay to continue in the |
|
3324 |
// normal locking code. |
|
3325 |
load_prototype_header(temp_reg, obj_reg); |
|
3326 |
||
3327 |
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
|
3328 |
||
3329 |
z_csg(mark_reg, temp_reg, 0, obj_reg); |
|
3330 |
||
3331 |
// Fall through to the normal CAS-based lock, because no matter what |
|
3332 |
// the result of the above CAS, some thread must have succeeded in |
|
3333 |
// removing the bias bit from the object's header. |
|
3334 |
if (PrintBiasedLockingStatistics) { |
|
3335 |
// z_cgr(mark_reg, temp2_reg); |
|
3336 |
increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); |
|
3337 |
} |
|
3338 |
||
3339 |
bind(cas_label); |
|
3340 |
BLOCK_COMMENT("} biased_locking_enter"); |
|
3341 |
} |
|
3342 |
||
3343 |
void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { |
|
3344 |
// Check for biased locking unlock case, which is a no-op |
|
3345 |
// Note: we do not have to check the thread ID for two reasons. |
|
3346 |
// First, the interpreter checks for IllegalMonitorStateException at |
|
3347 |
// a higher level. Second, if the bias was revoked while we held the |
|
3348 |
// lock, the object could not be rebiased toward another thread, so |
|
3349 |
// the bias bit would be clear. |
|
3350 |
BLOCK_COMMENT("biased_locking_exit {"); |
|
3351 |
||
3352 |
z_lg(temp_reg, 0, mark_addr); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3353 |
z_nilf(temp_reg, markWord::biased_lock_mask_in_place); |
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3354 |
|
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3355 |
z_chi(temp_reg, markWord::biased_lock_pattern); |
42065 | 3356 |
z_bre(done); |
3357 |
BLOCK_COMMENT("} biased_locking_exit"); |
|
3358 |
} |
|
3359 |
||
3360 |
void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { |
|
3361 |
Register displacedHeader = temp1; |
|
3362 |
Register currentHeader = temp1; |
|
3363 |
Register temp = temp2; |
|
3364 |
NearLabel done, object_has_monitor; |
|
3365 |
||
3366 |
BLOCK_COMMENT("compiler_fast_lock_object {"); |
|
3367 |
||
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3368 |
// Load markWord from oop into mark. |
42065 | 3369 |
z_lg(displacedHeader, 0, oop); |
3370 |
||
3371 |
if (try_bias) { |
|
3372 |
biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); |
|
3373 |
} |
|
3374 |
||
3375 |
// Handle existing monitor. |
|
51663 | 3376 |
// The object has an existing monitor iff (mark & monitor_value) != 0. |
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3377 |
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word"); |
51663 | 3378 |
z_lr(temp, displacedHeader); |
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3379 |
z_nill(temp, markWord::monitor_value); |
51663 | 3380 |
z_brne(object_has_monitor); |
42065 | 3381 |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3382 |
// Set mark to markWord | markWord::unlocked_value. |
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3383 |
z_oill(displacedHeader, markWord::unlocked_value); |
42065 | 3384 |
|
3385 |
// Load Compare Value application register. |
|
3386 |
||
3387 |
// Initialize the box (must happen before we update the object mark). |
|
3388 |
z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); |
|
3389 |
||
3390 |
// Memory Fence (in cmpxchgd) |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3391 |
// Compare object markWord with mark and if equal exchange scratch1 with object markWord. |
42065 | 3392 |
|
3393 |
// If the compare-and-swap succeeded, then we found an unlocked object and we |
|
3394 |
// have now locked it. |
|
3395 |
z_csg(displacedHeader, box, 0, oop); |
|
3396 |
assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. |
|
3397 |
z_bre(done); |
|
3398 |
||
3399 |
// We did not see an unlocked object so try the fast recursive case. |
|
3400 |
||
3401 |
z_sgr(currentHeader, Z_SP); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3402 |
load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place)); |
42065 | 3403 |
|
3404 |
z_ngr(currentHeader, temp); |
|
3405 |
// z_brne(done); |
|
3406 |
// z_release(); |
|
3407 |
z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); |
|
3408 |
||
3409 |
z_bru(done); |
|
3410 |
||
51663 | 3411 |
Register zero = temp; |
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3412 |
Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value. |
51663 | 3413 |
bind(object_has_monitor); |
3414 |
// The object's monitor m is unlocked iff m->owner == NULL, |
|
3415 |
// otherwise m->owner may contain a thread or a stack address. |
|
3416 |
// |
|
3417 |
// Try to CAS m->owner from NULL to current thread. |
|
3418 |
z_lghi(zero, 0); |
|
3419 |
// If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. |
|
3420 |
z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); |
|
3421 |
// Store a non-null value into the box. |
|
3422 |
z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); |
|
42065 | 3423 |
#ifdef ASSERT |
51663 | 3424 |
z_brne(done); |
3425 |
// We've acquired the monitor, check some invariants. |
|
3426 |
// Invariant 1: _recursions should be 0. |
|
3427 |
asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, |
|
3428 |
"monitor->_recursions should be 0", -1); |
|
3429 |
z_ltgr(zero, zero); // Set CR=EQ. |
|
42065 | 3430 |
#endif |
3431 |
bind(done); |
|
3432 |
||
3433 |
BLOCK_COMMENT("} compiler_fast_lock_object"); |
|
3434 |
// If locking was successful, CR should indicate 'EQ'. |
|
3435 |
// The compiler or the native wrapper generates a branch to the runtime call |
|
3436 |
// _complete_monitor_locking_Java. |
|
3437 |
} |
|
3438 |
||
3439 |
void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { |
|
3440 |
Register displacedHeader = temp1; |
|
3441 |
Register currentHeader = temp2; |
|
3442 |
Register temp = temp1; |
|
3443 |
Register monitor = temp2; |
|
3444 |
||
3445 |
Label done, object_has_monitor; |
|
3446 |
||
3447 |
BLOCK_COMMENT("compiler_fast_unlock_object {"); |
|
3448 |
||
3449 |
if (try_bias) { |
|
3450 |
biased_locking_exit(oop, currentHeader, done); |
|
3451 |
} |
|
3452 |
||
3453 |
// Find the lock address and load the displaced header from the stack. |
|
3454 |
// if the displaced header is zero, we have a recursive unlock. |
|
3455 |
load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); |
|
3456 |
z_bre(done); |
|
3457 |
||
3458 |
// Handle existing monitor. |
|
51663 | 3459 |
// The object has an existing monitor iff (mark & monitor_value) != 0. |
3460 |
z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3461 |
guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word"); |
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3462 |
z_nill(currentHeader, markWord::monitor_value); |
51663 | 3463 |
z_brne(object_has_monitor); |
42065 | 3464 |
|
3465 |
// Check if it is still a light weight lock, this is true if we see |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
57583
diff
changeset
|
3466 |
// the stack address of the basicLock in the markWord of the object |
42065 | 3467 |
// copy box to currentHeader such that csg does not kill it. |
3468 |
z_lgr(currentHeader, box); |
|
3469 |
z_csg(currentHeader, displacedHeader, 0, oop); |
|
3470 |
z_bru(done); // Csg sets CR as desired. |
|
3471 |
||
3472 |
// Handle existing monitor. |
|
51663 | 3473 |
bind(object_has_monitor); |
3474 |
z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. |
|
3475 |
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); |
|
3476 |
z_brne(done); |
|
3477 |
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); |
|
3478 |
z_brne(done); |
|
3479 |
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); |
|
3480 |
z_brne(done); |
|
3481 |
load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); |
|
3482 |
z_brne(done); |
|
3483 |
z_release(); |
|
3484 |
z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); |
|
42065 | 3485 |
|
3486 |
bind(done); |
|
3487 |
||
3488 |
BLOCK_COMMENT("} compiler_fast_unlock_object"); |
|
3489 |
// flag == EQ indicates success |
|
3490 |
// flag == NE indicates failure |
|
3491 |
} |
|
3492 |
||
44406
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
3493 |
void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { |
49754 | 3494 |
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); |
49748 | 3495 |
bs->resolve_jobject(this, value, tmp1, tmp2); |
3496 |
} |
|
42065 | 3497 |
|
3498 |
// Last_Java_sp must comply to the rules in frame_s390.hpp. |
|
3499 |
void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { |
|
3500 |
BLOCK_COMMENT("set_last_Java_frame {"); |
|
3501 |
||
3502 |
// Always set last_Java_pc and flags first because once last_Java_sp |
|
3503 |
// is visible has_last_Java_frame is true and users will look at the |
|
3504 |
// rest of the fields. (Note: flags should always be zero before we |
|
3505 |
// get here so doesn't need to be set.) |
|
3506 |
||
3507 |
// Verify that last_Java_pc was zeroed on return to Java. |
|
3508 |
if (allow_relocation) { |
|
3509 |
asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), |
|
3510 |
Z_thread, |
|
3511 |
"last_Java_pc not zeroed before leaving Java", |
|
3512 |
0x200); |
|
3513 |
} else { |
|
3514 |
asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), |
|
3515 |
Z_thread, |
|
3516 |
"last_Java_pc not zeroed before leaving Java", |
|
3517 |
0x200); |
|
3518 |
} |
|
3519 |
||
3520 |
// When returning from calling out from Java mode the frame anchor's |
|
3521 |
// last_Java_pc will always be set to NULL. It is set here so that |
|
3522 |
// if we are doing a call to native (not VM) that we capture the |
|
3523 |
// known pc and don't have to rely on the native call having a |
|
3524 |
// standard frame linkage where we can find the pc. |
|
3525 |
if (last_Java_pc!=noreg) { |
|
3526 |
z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); |
|
3527 |
} |
|
3528 |
||
3529 |
// This membar release is not required on z/Architecture, since the sequence of stores |
|
3530 |
// in maintained. Nevertheless, we leave it in to document the required ordering. |
|
3531 |
// The implementation of z_release() should be empty. |
|
3532 |
// z_release(); |
|
3533 |
||
3534 |
z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); |
|
3535 |
BLOCK_COMMENT("} set_last_Java_frame"); |
|
3536 |
} |
|
3537 |
||
3538 |
void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { |
|
3539 |
BLOCK_COMMENT("reset_last_Java_frame {"); |
|
3540 |
||
3541 |
if (allow_relocation) { |
|
3542 |
asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), |
|
3543 |
Z_thread, |
|
3544 |
"SP was not set, still zero", |
|
3545 |
0x202); |
|
3546 |
} else { |
|
3547 |
asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), |
|
3548 |
Z_thread, |
|
3549 |
"SP was not set, still zero", |
|
3550 |
0x202); |
|
3551 |
} |
|
3552 |
||
3553 |
// _last_Java_sp = 0 |
|
3554 |
// Clearing storage must be atomic here, so don't use clear_mem()! |
|
3555 |
store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); |
|
3556 |
||
3557 |
// _last_Java_pc = 0 |
|
3558 |
store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); |
|
3559 |
||
3560 |
BLOCK_COMMENT("} reset_last_Java_frame"); |
|
3561 |
return; |
|
3562 |
} |
|
3563 |
||
3564 |
void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { |
|
3565 |
assert_different_registers(sp, tmp1); |
|
3566 |
||
3567 |
// We cannot trust that code generated by the C++ compiler saves R14 |
|
3568 |
// to z_abi_160.return_pc, because sometimes it spills R14 using stmg at |
|
3569 |
// z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). |
|
3570 |
// Therefore we load the PC into tmp1 and let set_last_Java_frame() save |
|
3571 |
// it into the frame anchor. |
|
3572 |
get_PC(tmp1); |
|
3573 |
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); |
|
3574 |
} |
|
3575 |
||
3576 |
void MacroAssembler::set_thread_state(JavaThreadState new_state) { |
|
3577 |
z_release(); |
|
3578 |
||
3579 |
assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); |
|
3580 |
assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); |
|
3581 |
store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); |
|
3582 |
} |
|
3583 |
||
3584 |
void MacroAssembler::get_vm_result(Register oop_result) { |
|
3585 |
verify_thread(); |
|
3586 |
||
3587 |
z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); |
|
3588 |
clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); |
|
3589 |
||
3590 |
verify_oop(oop_result); |
|
3591 |
} |
|
3592 |
||
3593 |
void MacroAssembler::get_vm_result_2(Register result) { |
|
3594 |
verify_thread(); |
|
3595 |
||
3596 |
z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); |
|
3597 |
clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); |
|
3598 |
} |
|
3599 |
||
3600 |
// We require that C code which does not return a value in vm_result will |
|
3601 |
// leave it undisturbed. |
|
3602 |
void MacroAssembler::set_vm_result(Register oop_result) { |
|
3603 |
z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); |
|
3604 |
} |
|
3605 |
||
3606 |
// Explicit null checks (used for method handle code). |
|
3607 |
void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { |
|
3608 |
if (!ImplicitNullChecks) { |
|
3609 |
NearLabel ok; |
|
3610 |
||
3611 |
compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); |
|
3612 |
||
3613 |
// We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). |
|
3614 |
address exception_entry = Interpreter::throw_NullPointerException_entry(); |
|
3615 |
load_absolute_address(reg, exception_entry); |
|
3616 |
z_br(reg); |
|
3617 |
||
3618 |
bind(ok); |
|
3619 |
} else { |
|
3620 |
if (needs_explicit_null_check((intptr_t)offset)) { |
|
3621 |
// Provoke OS NULL exception if reg = NULL by |
|
3622 |
// accessing M[reg] w/o changing any registers. |
|
3623 |
z_lg(tmp, 0, reg); |
|
3624 |
} |
|
3625 |
// else |
|
3626 |
// Nothing to do, (later) access of M[reg + offset] |
|
3627 |
// will provoke OS NULL exception if reg = NULL. |
|
3628 |
} |
|
3629 |
} |
|
3630 |
||
3631 |
//------------------------------------- |
|
3632 |
// Compressed Klass Pointers |
|
3633 |
//------------------------------------- |
|
3634 |
||
3635 |
// Klass oop manipulations if compressed. |
|
3636 |
void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
|
3637 |
Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3638 |
address base = CompressedKlassPointers::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3639 |
int shift = CompressedKlassPointers::shift(); |
42065 | 3640 |
assert(UseCompressedClassPointers, "only for compressed klass ptrs"); |
3641 |
||
3642 |
BLOCK_COMMENT("cKlass encoder {"); |
|
3643 |
||
3644 |
#ifdef ASSERT |
|
3645 |
Label ok; |
|
3646 |
z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. |
|
3647 |
z_brc(Assembler::bcondAllZero, ok); |
|
3648 |
// The plain disassembler does not recognize illtrap. It instead displays |
|
3649 |
// a 32-bit value. Issueing two illtraps assures the disassembler finds |
|
3650 |
// the proper beginning of the next instruction. |
|
3651 |
z_illtrap(0xee); |
|
3652 |
z_illtrap(0xee); |
|
3653 |
bind(ok); |
|
3654 |
#endif |
|
3655 |
||
3656 |
if (base != NULL) { |
|
3657 |
unsigned int base_h = ((unsigned long)base)>>32; |
|
3658 |
unsigned int base_l = (unsigned int)((unsigned long)base); |
|
3659 |
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { |
|
3660 |
lgr_if_needed(dst, current); |
|
3661 |
z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. |
|
3662 |
} else if ((base_h == 0) && (base_l != 0)) { |
|
3663 |
lgr_if_needed(dst, current); |
|
3664 |
z_agfi(dst, -(int)base_l); |
|
3665 |
} else { |
|
3666 |
load_const(Z_R0, base); |
|
3667 |
lgr_if_needed(dst, current); |
|
3668 |
z_sgr(dst, Z_R0); |
|
3669 |
} |
|
3670 |
current = dst; |
|
3671 |
} |
|
3672 |
if (shift != 0) { |
|
3673 |
assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); |
|
3674 |
z_srlg(dst, current, shift); |
|
3675 |
current = dst; |
|
3676 |
} |
|
3677 |
lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). |
|
3678 |
||
3679 |
BLOCK_COMMENT("} cKlass encoder"); |
|
3680 |
} |
|
3681 |
||
3682 |
// This function calculates the size of the code generated by |
|
3683 |
// decode_klass_not_null(register dst, Register src) |
|
3684 |
// when (Universe::heap() != NULL). Hence, if the instructions |
|
3685 |
// it generates change, then this method needs to be updated. |
|
3686 |
int MacroAssembler::instr_size_for_decode_klass_not_null() { |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3687 |
address base = CompressedKlassPointers::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3688 |
int shift_size = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */ |
42065 | 3689 |
int addbase_size = 0; |
3690 |
assert(UseCompressedClassPointers, "only for compressed klass ptrs"); |
|
3691 |
||
3692 |
if (base != NULL) { |
|
3693 |
unsigned int base_h = ((unsigned long)base)>>32; |
|
3694 |
unsigned int base_l = (unsigned int)((unsigned long)base); |
|
3695 |
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { |
|
3696 |
addbase_size += 6; /* aih */ |
|
3697 |
} else if ((base_h == 0) && (base_l != 0)) { |
|
3698 |
addbase_size += 6; /* algfi */ |
|
3699 |
} else { |
|
3700 |
addbase_size += load_const_size(); |
|
3701 |
addbase_size += 4; /* algr */ |
|
3702 |
} |
|
3703 |
} |
|
3704 |
#ifdef ASSERT |
|
3705 |
addbase_size += 10; |
|
3706 |
addbase_size += 2; // Extra sigill. |
|
3707 |
#endif |
|
3708 |
return addbase_size + shift_size; |
|
3709 |
} |
|
3710 |
||
3711 |
// !!! If the instructions that get generated here change |
|
3712 |
// then function instr_size_for_decode_klass_not_null() |
|
3713 |
// needs to get updated. |
|
3714 |
// This variant of decode_klass_not_null() must generate predictable code! |
|
3715 |
// The code must only depend on globally known parameters. |
|
3716 |
void MacroAssembler::decode_klass_not_null(Register dst) { |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3717 |
address base = CompressedKlassPointers::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3718 |
int shift = CompressedKlassPointers::shift(); |
42065 | 3719 |
int beg_off = offset(); |
3720 |
assert(UseCompressedClassPointers, "only for compressed klass ptrs"); |
|
3721 |
||
3722 |
BLOCK_COMMENT("cKlass decoder (const size) {"); |
|
3723 |
||
3724 |
if (shift != 0) { // Shift required? |
|
3725 |
z_sllg(dst, dst, shift); |
|
3726 |
} |
|
3727 |
if (base != NULL) { |
|
3728 |
unsigned int base_h = ((unsigned long)base)>>32; |
|
3729 |
unsigned int base_l = (unsigned int)((unsigned long)base); |
|
3730 |
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { |
|
3731 |
z_aih(dst, base_h); // Base has no set bits in lower half. |
|
3732 |
} else if ((base_h == 0) && (base_l != 0)) { |
|
3733 |
z_algfi(dst, base_l); // Base has no set bits in upper half. |
|
3734 |
} else { |
|
3735 |
load_const(Z_R0, base); // Base has set bits everywhere. |
|
3736 |
z_algr(dst, Z_R0); |
|
3737 |
} |
|
3738 |
} |
|
3739 |
||
3740 |
#ifdef ASSERT |
|
3741 |
Label ok; |
|
3742 |
z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. |
|
3743 |
z_brc(Assembler::bcondAllZero, ok); |
|
3744 |
// The plain disassembler does not recognize illtrap. It instead displays |
|
3745 |
// a 32-bit value. Issueing two illtraps assures the disassembler finds |
|
3746 |
// the proper beginning of the next instruction. |
|
3747 |
z_illtrap(0xd1); |
|
3748 |
z_illtrap(0xd1); |
|
3749 |
bind(ok); |
|
3750 |
#endif |
|
3751 |
assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); |
|
3752 |
||
3753 |
BLOCK_COMMENT("} cKlass decoder (const size)"); |
|
3754 |
} |
|
3755 |
||
3756 |
// This variant of decode_klass_not_null() is for cases where |
|
3757 |
// 1) the size of the generated instructions may vary |
|
3758 |
// 2) the result is (potentially) stored in a register different from the source. |
|
3759 |
void MacroAssembler::decode_klass_not_null(Register dst, Register src) { |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3760 |
address base = CompressedKlassPointers::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3761 |
int shift = CompressedKlassPointers::shift(); |
42065 | 3762 |
assert(UseCompressedClassPointers, "only for compressed klass ptrs"); |
3763 |
||
3764 |
BLOCK_COMMENT("cKlass decoder {"); |
|
3765 |
||
3766 |
if (src == noreg) src = dst; |
|
3767 |
||
3768 |
if (shift != 0) { // Shift or at least move required? |
|
3769 |
z_sllg(dst, src, shift); |
|
3770 |
} else { |
|
3771 |
lgr_if_needed(dst, src); |
|
3772 |
} |
|
3773 |
||
3774 |
if (base != NULL) { |
|
3775 |
unsigned int base_h = ((unsigned long)base)>>32; |
|
3776 |
unsigned int base_l = (unsigned int)((unsigned long)base); |
|
3777 |
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { |
|
3778 |
z_aih(dst, base_h); // Base has not set bits in lower half. |
|
3779 |
} else if ((base_h == 0) && (base_l != 0)) { |
|
3780 |
z_algfi(dst, base_l); // Base has no set bits in upper half. |
|
3781 |
} else { |
|
3782 |
load_const_optimized(Z_R0, base); // Base has set bits everywhere. |
|
3783 |
z_algr(dst, Z_R0); |
|
3784 |
} |
|
3785 |
} |
|
3786 |
||
3787 |
#ifdef ASSERT |
|
3788 |
Label ok; |
|
3789 |
z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. |
|
3790 |
z_brc(Assembler::bcondAllZero, ok); |
|
3791 |
// The plain disassembler does not recognize illtrap. It instead displays |
|
3792 |
// a 32-bit value. Issueing two illtraps assures the disassembler finds |
|
3793 |
// the proper beginning of the next instruction. |
|
3794 |
z_illtrap(0xd2); |
|
3795 |
z_illtrap(0xd2); |
|
3796 |
bind(ok); |
|
3797 |
#endif |
|
3798 |
BLOCK_COMMENT("} cKlass decoder"); |
|
3799 |
} |
|
3800 |
||
3801 |
void MacroAssembler::load_klass(Register klass, Address mem) { |
|
3802 |
if (UseCompressedClassPointers) { |
|
3803 |
z_llgf(klass, mem); |
|
3804 |
// Attention: no null check here! |
|
3805 |
decode_klass_not_null(klass); |
|
3806 |
} else { |
|
3807 |
z_lg(klass, mem); |
|
3808 |
} |
|
3809 |
} |
|
3810 |
||
3811 |
void MacroAssembler::load_klass(Register klass, Register src_oop) { |
|
3812 |
if (UseCompressedClassPointers) { |
|
3813 |
z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); |
|
3814 |
// Attention: no null check here! |
|
3815 |
decode_klass_not_null(klass); |
|
3816 |
} else { |
|
3817 |
z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); |
|
3818 |
} |
|
3819 |
} |
|
3820 |
||
3821 |
void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { |
|
3822 |
assert_different_registers(Rheader, Rsrc_oop); |
|
3823 |
load_klass(Rheader, Rsrc_oop); |
|
3824 |
z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); |
|
3825 |
} |
|
3826 |
||
3827 |
void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { |
|
3828 |
if (UseCompressedClassPointers) { |
|
3829 |
assert_different_registers(dst_oop, klass, Z_R0); |
|
3830 |
if (ck == noreg) ck = klass; |
|
3831 |
encode_klass_not_null(ck, klass); |
|
3832 |
z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); |
|
3833 |
} else { |
|
3834 |
z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); |
|
3835 |
} |
|
3836 |
} |
|
3837 |
||
3838 |
void MacroAssembler::store_klass_gap(Register s, Register d) { |
|
3839 |
if (UseCompressedClassPointers) { |
|
3840 |
assert(s != d, "not enough registers"); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3841 |
// Support s = noreg. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3842 |
if (s != noreg) { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3843 |
z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3844 |
} else { |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3845 |
z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
3846 |
} |
42065 | 3847 |
} |
3848 |
} |
|
3849 |
||
3850 |
// Compare klass ptr in memory against klass ptr in register. |
|
3851 |
// |
|
3852 |
// Rop1 - klass in register, always uncompressed. |
|
3853 |
// disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. |
|
3854 |
// Rbase - Base address of cKlass in memory. |
|
3855 |
// maybeNULL - True if Rop1 possibly is a NULL. |
|
3856 |
void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { |
|
3857 |
||
3858 |
BLOCK_COMMENT("compare klass ptr {"); |
|
3859 |
||
3860 |
if (UseCompressedClassPointers) { |
|
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3861 |
const int shift = CompressedKlassPointers::shift(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3862 |
address base = CompressedKlassPointers::base(); |
42065 | 3863 |
|
3864 |
assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); |
|
3865 |
assert_different_registers(Rop1, Z_R0); |
|
3866 |
assert_different_registers(Rop1, Rbase, Z_R1); |
|
3867 |
||
3868 |
// First encode register oop and then compare with cOop in memory. |
|
3869 |
// This sequence saves an unnecessary cOop load and decode. |
|
3870 |
if (base == NULL) { |
|
3871 |
if (shift == 0) { |
|
3872 |
z_cl(Rop1, disp, Rbase); // Unscaled |
|
3873 |
} else { |
|
3874 |
z_srlg(Z_R0, Rop1, shift); // ZeroBased |
|
3875 |
z_cl(Z_R0, disp, Rbase); |
|
3876 |
} |
|
3877 |
} else { // HeapBased |
|
3878 |
#ifdef ASSERT |
|
3879 |
bool used_R0 = true; |
|
3880 |
bool used_R1 = true; |
|
3881 |
#endif |
|
3882 |
Register current = Rop1; |
|
3883 |
Label done; |
|
3884 |
||
3885 |
if (maybeNULL) { // NULL ptr must be preserved! |
|
3886 |
z_ltgr(Z_R0, current); |
|
3887 |
z_bre(done); |
|
3888 |
current = Z_R0; |
|
3889 |
} |
|
3890 |
||
3891 |
unsigned int base_h = ((unsigned long)base)>>32; |
|
3892 |
unsigned int base_l = (unsigned int)((unsigned long)base); |
|
3893 |
if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { |
|
3894 |
lgr_if_needed(Z_R0, current); |
|
3895 |
z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. |
|
3896 |
} else if ((base_h == 0) && (base_l != 0)) { |
|
3897 |
lgr_if_needed(Z_R0, current); |
|
3898 |
z_agfi(Z_R0, -(int)base_l); |
|
3899 |
} else { |
|
3900 |
int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); |
|
3901 |
add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. |
|
3902 |
} |
|
3903 |
||
3904 |
if (shift != 0) { |
|
3905 |
z_srlg(Z_R0, Z_R0, shift); |
|
3906 |
} |
|
3907 |
bind(done); |
|
3908 |
z_cl(Z_R0, disp, Rbase); |
|
3909 |
#ifdef ASSERT |
|
3910 |
if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); |
|
3911 |
if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); |
|
3912 |
#endif |
|
3913 |
} |
|
3914 |
} else { |
|
3915 |
z_clg(Rop1, disp, Z_R0, Rbase); |
|
3916 |
} |
|
3917 |
BLOCK_COMMENT("} compare klass ptr"); |
|
3918 |
} |
|
3919 |
||
3920 |
//--------------------------- |
|
3921 |
// Compressed oops |
|
3922 |
//--------------------------- |
|
3923 |
||
3924 |
void MacroAssembler::encode_heap_oop(Register oop) { |
|
3925 |
oop_encoder(oop, oop, true /*maybe null*/); |
|
3926 |
} |
|
3927 |
||
3928 |
void MacroAssembler::encode_heap_oop_not_null(Register oop) { |
|
3929 |
oop_encoder(oop, oop, false /*not null*/); |
|
3930 |
} |
|
3931 |
||
3932 |
// Called with something derived from the oop base. e.g. oop_base>>3. |
|
3933 |
int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { |
|
3934 |
unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; |
|
3935 |
unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; |
|
3936 |
unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; |
|
3937 |
unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; |
|
3938 |
unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) |
|
3939 |
+ (oop_base_lh == 0 ? 0:1) |
|
3940 |
+ (oop_base_hl == 0 ? 0:1) |
|
3941 |
+ (oop_base_hh == 0 ? 0:1); |
|
3942 |
||
3943 |
assert(oop_base != 0, "This is for HeapBased cOops only"); |
|
3944 |
||
3945 |
if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. |
|
3946 |
uint64_t pow2_offset = 0x10000 - oop_base_ll; |
|
3947 |
if (pow2_offset < 0x8000) { // This might not be necessary. |
|
3948 |
uint64_t oop_base2 = oop_base + pow2_offset; |
|
3949 |
||
3950 |
oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; |
|
3951 |
oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; |
|
3952 |
oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; |
|
3953 |
oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; |
|
3954 |
n_notzero_parts = (oop_base_ll == 0 ? 0:1) + |
|
3955 |
(oop_base_lh == 0 ? 0:1) + |
|
3956 |
(oop_base_hl == 0 ? 0:1) + |
|
3957 |
(oop_base_hh == 0 ? 0:1); |
|
3958 |
if (n_notzero_parts == 1) { |
|
3959 |
assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); |
|
3960 |
return -pow2_offset; |
|
3961 |
} |
|
3962 |
} |
|
3963 |
} |
|
3964 |
return 0; |
|
3965 |
} |
|
3966 |
||
3967 |
// If base address is offset from a straight power of two by just a few pages, |
|
3968 |
// return this offset to the caller for a possible later composite add. |
|
3969 |
// TODO/FIX: will only work correctly for 4k pages. |
|
3970 |
int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { |
|
3971 |
int pow2_offset = get_oop_base_pow2_offset(oop_base); |
|
3972 |
||
3973 |
load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. |
|
3974 |
||
3975 |
return pow2_offset; |
|
3976 |
} |
|
3977 |
||
3978 |
int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { |
|
3979 |
int offset = get_oop_base(Rbase, oop_base); |
|
3980 |
z_lcgr(Rbase, Rbase); |
|
3981 |
return -offset; |
|
3982 |
} |
|
3983 |
||
3984 |
// Compare compressed oop in memory against oop in register. |
|
3985 |
// Rop1 - Oop in register. |
|
3986 |
// disp - Offset of cOop in memory. |
|
3987 |
// Rbase - Base address of cOop in memory. |
|
3988 |
// maybeNULL - True if Rop1 possibly is a NULL. |
|
3989 |
// maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. |
|
3990 |
void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { |
|
3991 |
Register Rbase = mem.baseOrR0(); |
|
3992 |
Register Rindex = mem.indexOrR0(); |
|
3993 |
int64_t disp = mem.disp(); |
|
3994 |
||
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3995 |
const int shift = CompressedOops::shift(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
3996 |
address base = CompressedOops::base(); |
42065 | 3997 |
|
3998 |
assert(UseCompressedOops, "must be on to call this method"); |
|
3999 |
assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); |
|
4000 |
assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); |
|
4001 |
assert_different_registers(Rop1, Z_R0); |
|
4002 |
assert_different_registers(Rop1, Rbase, Z_R1); |
|
4003 |
assert_different_registers(Rop1, Rindex, Z_R1); |
|
4004 |
||
4005 |
BLOCK_COMMENT("compare heap oop {"); |
|
4006 |
||
4007 |
// First encode register oop and then compare with cOop in memory. |
|
4008 |
// This sequence saves an unnecessary cOop load and decode. |
|
4009 |
if (base == NULL) { |
|
4010 |
if (shift == 0) { |
|
4011 |
z_cl(Rop1, disp, Rindex, Rbase); // Unscaled |
|
4012 |
} else { |
|
4013 |
z_srlg(Z_R0, Rop1, shift); // ZeroBased |
|
4014 |
z_cl(Z_R0, disp, Rindex, Rbase); |
|
4015 |
} |
|
4016 |
} else { // HeapBased |
|
4017 |
#ifdef ASSERT |
|
4018 |
bool used_R0 = true; |
|
4019 |
bool used_R1 = true; |
|
4020 |
#endif |
|
4021 |
Label done; |
|
4022 |
int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); |
|
4023 |
||
4024 |
if (maybeNULL) { // NULL ptr must be preserved! |
|
4025 |
z_ltgr(Z_R0, Rop1); |
|
4026 |
z_bre(done); |
|
4027 |
} |
|
4028 |
||
4029 |
add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); |
|
4030 |
z_srlg(Z_R0, Z_R0, shift); |
|
4031 |
||
4032 |
bind(done); |
|
4033 |
z_cl(Z_R0, disp, Rindex, Rbase); |
|
4034 |
#ifdef ASSERT |
|
4035 |
if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); |
|
4036 |
if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); |
|
4037 |
#endif |
|
4038 |
} |
|
4039 |
BLOCK_COMMENT("} compare heap oop"); |
|
4040 |
} |
|
4041 |
||
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4042 |
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4043 |
const Address& addr, Register val, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4044 |
Register tmp1, Register tmp2, Register tmp3) { |
50728 | 4045 |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL | |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4046 |
ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator"); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4047 |
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); |
50446
39ca7558bc43
8203353: Fixup inferred decorators in the interpreter
eosterlund
parents:
50162
diff
changeset
|
4048 |
decorators = AccessInternal::decorator_fixup(decorators); |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4049 |
bool as_raw = (decorators & AS_RAW) != 0; |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4050 |
if (as_raw) { |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4051 |
bs->BarrierSetAssembler::store_at(this, decorators, type, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4052 |
addr, val, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4053 |
tmp1, tmp2, tmp3); |
42065 | 4054 |
} else { |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4055 |
bs->store_at(this, decorators, type, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4056 |
addr, val, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4057 |
tmp1, tmp2, tmp3); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4058 |
} |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4059 |
} |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4060 |
|
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4061 |
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4062 |
const Address& addr, Register dst, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4063 |
Register tmp1, Register tmp2, Label *is_null) { |
50728 | 4064 |
assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL | |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4065 |
ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator"); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4066 |
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); |
50446
39ca7558bc43
8203353: Fixup inferred decorators in the interpreter
eosterlund
parents:
50162
diff
changeset
|
4067 |
decorators = AccessInternal::decorator_fixup(decorators); |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4068 |
bool as_raw = (decorators & AS_RAW) != 0; |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4069 |
if (as_raw) { |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4070 |
bs->BarrierSetAssembler::load_at(this, decorators, type, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4071 |
addr, dst, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4072 |
tmp1, tmp2, is_null); |
42065 | 4073 |
} else { |
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4074 |
bs->load_at(this, decorators, type, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4075 |
addr, dst, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4076 |
tmp1, tmp2, is_null); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4077 |
} |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4078 |
} |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4079 |
|
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4080 |
void MacroAssembler::load_heap_oop(Register dest, const Address &a, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4081 |
Register tmp1, Register tmp2, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4082 |
DecoratorSet decorators, Label *is_null) { |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4083 |
access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4084 |
} |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4085 |
|
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4086 |
void MacroAssembler::store_heap_oop(Register Roop, const Address &a, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4087 |
Register tmp1, Register tmp2, Register tmp3, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4088 |
DecoratorSet decorators) { |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49754
diff
changeset
|
4089 |
access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3); |
42065 | 4090 |
} |
4091 |
||
4092 |
//------------------------------------------------- |
|
4093 |
// Encode compressed oop. Generally usable encoder. |
|
4094 |
//------------------------------------------------- |
|
4095 |
// Rsrc - contains regular oop on entry. It remains unchanged. |
|
4096 |
// Rdst - contains compressed oop on exit. |
|
4097 |
// Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. |
|
4098 |
// |
|
4099 |
// Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. |
|
4100 |
// Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. |
|
4101 |
// |
|
4102 |
// only32bitValid is set, if later code only uses the lower 32 bits. In this |
|
4103 |
// case we must not fix the upper 32 bits. |
|
4104 |
void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, |
|
4105 |
Register Rbase, int pow2_offset, bool only32bitValid) { |
|
4106 |
||
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4107 |
const address oop_base = CompressedOops::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4108 |
const int oop_shift = CompressedOops::shift(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4109 |
const bool disjoint = CompressedOops::base_disjoint(); |
42065 | 4110 |
|
4111 |
assert(UseCompressedOops, "must be on to call this method"); |
|
4112 |
assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); |
|
4113 |
assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); |
|
4114 |
||
4115 |
if (disjoint || (oop_base == NULL)) { |
|
4116 |
BLOCK_COMMENT("cOop encoder zeroBase {"); |
|
4117 |
if (oop_shift == 0) { |
|
4118 |
if (oop_base != NULL && !only32bitValid) { |
|
4119 |
z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. |
|
4120 |
} else { |
|
4121 |
lgr_if_needed(Rdst, Rsrc); |
|
4122 |
} |
|
4123 |
} else { |
|
4124 |
z_srlg(Rdst, Rsrc, oop_shift); |
|
4125 |
if (oop_base != NULL && !only32bitValid) { |
|
4126 |
z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. |
|
4127 |
} |
|
4128 |
} |
|
4129 |
BLOCK_COMMENT("} cOop encoder zeroBase"); |
|
4130 |
return; |
|
4131 |
} |
|
4132 |
||
4133 |
bool used_R0 = false; |
|
4134 |
bool used_R1 = false; |
|
4135 |
||
4136 |
BLOCK_COMMENT("cOop encoder general {"); |
|
4137 |
assert_different_registers(Rdst, Z_R1); |
|
4138 |
assert_different_registers(Rsrc, Rbase); |
|
4139 |
if (maybeNULL) { |
|
4140 |
Label done; |
|
4141 |
// We reorder shifting and subtracting, so that we can compare |
|
4142 |
// and shift in parallel: |
|
4143 |
// |
|
4144 |
// cycle 0: potential LoadN, base = <const> |
|
4145 |
// cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) |
|
4146 |
// cycle 2: if (cr) br, dst = dst + base + offset |
|
4147 |
||
4148 |
// Get oop_base components. |
|
4149 |
if (pow2_offset == -1) { |
|
4150 |
if (Rdst == Rbase) { |
|
4151 |
if (Rdst == Z_R1 || Rsrc == Z_R1) { |
|
4152 |
Rbase = Z_R0; |
|
4153 |
used_R0 = true; |
|
4154 |
} else { |
|
4155 |
Rdst = Z_R1; |
|
4156 |
used_R1 = true; |
|
4157 |
} |
|
4158 |
} |
|
4159 |
if (Rbase == Z_R1) { |
|
4160 |
used_R1 = true; |
|
4161 |
} |
|
4162 |
pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); |
|
4163 |
} |
|
4164 |
assert_different_registers(Rdst, Rbase); |
|
4165 |
||
4166 |
// Check for NULL oop (must be left alone) and shift. |
|
4167 |
if (oop_shift != 0) { // Shift out alignment bits |
|
4168 |
if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. |
|
4169 |
z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. |
|
4170 |
} else { |
|
4171 |
z_srlg(Rdst, Rsrc, oop_shift); |
|
4172 |
z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. |
|
4173 |
// This probably is faster, as it does not write a register. No! |
|
4174 |
// z_cghi(Rsrc, 0); |
|
4175 |
} |
|
4176 |
} else { |
|
4177 |
z_ltgr(Rdst, Rsrc); // Move NULL to result register. |
|
4178 |
} |
|
4179 |
z_bre(done); |
|
4180 |
||
4181 |
// Subtract oop_base components. |
|
4182 |
if ((Rdst == Z_R0) || (Rbase == Z_R0)) { |
|
4183 |
z_algr(Rdst, Rbase); |
|
4184 |
if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } |
|
4185 |
} else { |
|
4186 |
add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); |
|
4187 |
} |
|
4188 |
if (!only32bitValid) { |
|
4189 |
z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. |
|
4190 |
} |
|
4191 |
bind(done); |
|
4192 |
||
4193 |
} else { // not null |
|
4194 |
// Get oop_base components. |
|
4195 |
if (pow2_offset == -1) { |
|
4196 |
pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); |
|
4197 |
} |
|
4198 |
||
4199 |
// Subtract oop_base components and shift. |
|
4200 |
if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { |
|
4201 |
// Don't use lay instruction. |
|
4202 |
if (Rdst == Rsrc) { |
|
4203 |
z_algr(Rdst, Rbase); |
|
4204 |
} else { |
|
4205 |
lgr_if_needed(Rdst, Rbase); |
|
4206 |
z_algr(Rdst, Rsrc); |
|
4207 |
} |
|
4208 |
if (pow2_offset != 0) add2reg(Rdst, pow2_offset); |
|
4209 |
} else { |
|
4210 |
add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); |
|
4211 |
} |
|
4212 |
if (oop_shift != 0) { // Shift out alignment bits. |
|
4213 |
z_srlg(Rdst, Rdst, oop_shift); |
|
4214 |
} |
|
4215 |
if (!only32bitValid) { |
|
4216 |
z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. |
|
4217 |
} |
|
4218 |
} |
|
4219 |
#ifdef ASSERT |
|
4220 |
if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } |
|
4221 |
if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } |
|
4222 |
#endif |
|
4223 |
BLOCK_COMMENT("} cOop encoder general"); |
|
4224 |
} |
|
4225 |
||
4226 |
//------------------------------------------------- |
|
4227 |
// decode compressed oop. Generally usable decoder. |
|
4228 |
//------------------------------------------------- |
|
4229 |
// Rsrc - contains compressed oop on entry. |
|
4230 |
// Rdst - contains regular oop on exit. |
|
4231 |
// Rdst and Rsrc may indicate same register. |
|
4232 |
// Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). |
|
4233 |
// Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. |
|
4234 |
// Rbase - register to use for the base |
|
4235 |
// pow2_offset - offset of base to nice value. If -1, base must be loaded. |
|
4236 |
// For performance, it is good to |
|
4237 |
// - avoid Z_R0 for any of the argument registers. |
|
4238 |
// - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. |
|
4239 |
// - avoid Z_R1 for Rdst if Rdst == Rbase. |
|
4240 |
void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { |
|
4241 |
||
54780
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4242 |
const address oop_base = CompressedOops::base(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4243 |
const int oop_shift = CompressedOops::shift(); |
f8d182aedc92
8223136: Move compressed oops functions to CompressedOops class
stefank
parents:
54542
diff
changeset
|
4244 |
const bool disjoint = CompressedOops::base_disjoint(); |
42065 | 4245 |
|
4246 |
assert(UseCompressedOops, "must be on to call this method"); |
|
4247 |
assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); |
|
4248 |
assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), |
|
4249 |
"cOop encoder detected bad shift"); |
|
4250 |
||
4251 |
// cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. |
|
4252 |
||
4253 |
if (oop_base != NULL) { |
|
4254 |
unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; |
|
4255 |
unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; |
|
4256 |
unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; |
|
4257 |
if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { |
|
4258 |
BLOCK_COMMENT("cOop decoder disjointBase {"); |
|
4259 |
// We do not need to load the base. Instead, we can install the upper bits |
|
4260 |
// with an OR instead of an ADD. |
|
4261 |
Label done; |
|
4262 |
||
4263 |
// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. |
|
4264 |
if (maybeNULL) { // NULL ptr must be preserved! |
|
4265 |
z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. |
|
4266 |
z_bre(done); |
|
4267 |
} else { |
|
4268 |
z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. |
|
4269 |
} |
|
4270 |
if ((oop_base_hl != 0) && (oop_base_hh != 0)) { |
|
4271 |
z_oihf(Rdst, oop_base_hf); |
|
4272 |
} else if (oop_base_hl != 0) { |
|
4273 |
z_oihl(Rdst, oop_base_hl); |
|
4274 |
} else { |
|
4275 |
assert(oop_base_hh != 0, "not heapbased mode"); |
|
4276 |
z_oihh(Rdst, oop_base_hh); |
|
4277 |
} |
|
4278 |
bind(done); |
|
4279 |
BLOCK_COMMENT("} cOop decoder disjointBase"); |
|
4280 |
} else { |
|
4281 |
BLOCK_COMMENT("cOop decoder general {"); |
|
4282 |
// There are three decode steps: |
|
4283 |
// scale oop offset (shift left) |
|
4284 |
// get base (in reg) and pow2_offset (constant) |
|
4285 |
// add base, pow2_offset, and oop offset |
|
4286 |
// The following register overlap situations may exist: |
|
4287 |
// Rdst == Rsrc, Rbase any other |
|
4288 |
// not a problem. Scaling in-place leaves Rbase undisturbed. |
|
4289 |
// Loading Rbase does not impact the scaled offset. |
|
4290 |
// Rdst == Rbase, Rsrc any other |
|
4291 |
// scaling would destroy a possibly preloaded Rbase. Loading Rbase |
|
4292 |
// would destroy the scaled offset. |
|
4293 |
// Remedy: use Rdst_tmp if Rbase has been preloaded. |
|
4294 |
// use Rbase_tmp if base has to be loaded. |
|
4295 |
// Rsrc == Rbase, Rdst any other |
|
4296 |
// Only possible without preloaded Rbase. |
|
4297 |
// Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. |
|
4298 |
// Rsrc == Rbase, Rdst == Rbase |
|
4299 |
// Only possible without preloaded Rbase. |
|
4300 |
// Loading Rbase would destroy compressed oop. Scaling in-place is ok. |
|
4301 |
// Remedy: use Rbase_tmp. |
|
4302 |
// |
|
4303 |
Label done; |
|
4304 |
Register Rdst_tmp = Rdst; |
|
4305 |
Register Rbase_tmp = Rbase; |
|
4306 |
bool used_R0 = false; |
|
4307 |
bool used_R1 = false; |
|
4308 |
bool base_preloaded = pow2_offset >= 0; |
|
4309 |
guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); |
|
4310 |
assert(oop_shift != 0, "room for optimization"); |
|
4311 |
||
4312 |
// Check if we need to use scratch registers. |
|
4313 |
if (Rdst == Rbase) { |
|
4314 |
assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); |
|
4315 |
if (Rdst != Rsrc) { |
|
4316 |
if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } |
|
4317 |
else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } |
|
4318 |
} else { |
|
4319 |
Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; |
|
4320 |
} |
|
4321 |
} |
|
4322 |
if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); |
|
4323 |
||
4324 |
// Scale oop and check for NULL. |
|
4325 |
// Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. |
|
4326 |
if (maybeNULL) { // NULL ptr must be preserved! |
|
4327 |
z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. |
|
4328 |
z_bre(done); |
|
4329 |
} else { |
|
4330 |
z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. |
|
4331 |
} |
|
4332 |
||
4333 |
// Get oop_base components. |
|
4334 |
if (!base_preloaded) { |
|
4335 |
pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); |
|
4336 |
} |
|
4337 |
||
4338 |
// Add up all components. |
|
4339 |
if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { |
|
4340 |
z_algr(Rdst_tmp, Rbase_tmp); |
|
4341 |
if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } |
|
4342 |
} else { |
|
4343 |
add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); |
|
4344 |
} |
|
4345 |
||
4346 |
bind(done); |
|
4347 |
lgr_if_needed(Rdst, Rdst_tmp); |
|
4348 |
#ifdef ASSERT |
|
4349 |
if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } |
|
4350 |
if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } |
|
4351 |
#endif |
|
4352 |
BLOCK_COMMENT("} cOop decoder general"); |
|
4353 |
} |
|
4354 |
} else { |
|
4355 |
BLOCK_COMMENT("cOop decoder zeroBase {"); |
|
4356 |
if (oop_shift == 0) { |
|
4357 |
lgr_if_needed(Rdst, Rsrc); |
|
4358 |
} else { |
|
4359 |
z_sllg(Rdst, Rsrc, oop_shift); |
|
4360 |
} |
|
4361 |
BLOCK_COMMENT("} cOop decoder zeroBase"); |
|
4362 |
} |
|
4363 |
} |
|
4364 |
||
46961
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4365 |
// ((OopHandle)result).resolve(); |
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4366 |
void MacroAssembler::resolve_oop_handle(Register result) { |
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4367 |
// OopHandle::resolve is an indirection. |
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4368 |
z_lg(result, 0, result); |
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4369 |
} |
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
4370 |
|
55342
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54780
diff
changeset
|
4371 |
void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) { |
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54780
diff
changeset
|
4372 |
mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset())); |
42065 | 4373 |
mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); |
4374 |
mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); |
|
47580 | 4375 |
resolve_oop_handle(mirror); |
42065 | 4376 |
} |
4377 |
||
55343
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4378 |
void MacroAssembler::load_method_holder(Register holder, Register method) { |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4379 |
mem2reg_opt(holder, Address(method, Method::const_offset())); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4380 |
mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset())); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4381 |
mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4382 |
} |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
4383 |
|
42065 | 4384 |
//--------------------------------------------------------------- |
4385 |
//--- Operations on arrays. |
|
4386 |
//--------------------------------------------------------------- |
|
4387 |
||
4388 |
// Compiler ensures base is doubleword aligned and cnt is #doublewords. |
|
4389 |
// Emitter does not KILL cnt and base arguments, since they need to be copied to |
|
4390 |
// work registers anyway. |
|
4391 |
// Actually, only r0, r1, and r5 are killed. |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4392 |
unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) { |
42065 | 4393 |
|
4394 |
int block_start = offset(); |
|
4395 |
Register dst_len = Z_R1; // Holds dst len for MVCLE. |
|
4396 |
Register dst_addr = Z_R0; // Holds dst addr for MVCLE. |
|
4397 |
||
4398 |
Label doXC, doMVCLE, done; |
|
4399 |
||
4400 |
BLOCK_COMMENT("Clear_Array {"); |
|
4401 |
||
4402 |
// Check for zero len and convert to long. |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4403 |
z_ltgfr(odd_tmp_reg, cnt_arg); |
42065 | 4404 |
z_bre(done); // Nothing to do if len == 0. |
4405 |
||
4406 |
// Prefetch data to be cleared. |
|
4407 |
if (VM_Version::has_Prefetch()) { |
|
4408 |
z_pfd(0x02, 0, Z_R0, base_pointer_arg); |
|
4409 |
z_pfd(0x02, 256, Z_R0, base_pointer_arg); |
|
4410 |
} |
|
4411 |
||
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4412 |
z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4413 |
z_cghi(odd_tmp_reg, 32); // Check for len <= 256 bytes (<=32 DW). |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4414 |
z_brnh(doXC); // If so, use executed XC to clear. |
42065 | 4415 |
|
4416 |
// MVCLE: initialize long arrays (general case). |
|
4417 |
bind(doMVCLE); |
|
4418 |
z_lgr(dst_addr, base_pointer_arg); |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4419 |
// Pass 0 as source length to MVCLE: destination will be filled with padding byte 0. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4420 |
// The even register of the register pair is not killed. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4421 |
clear_reg(odd_tmp_reg, true, false); |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4422 |
MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0); |
42065 | 4423 |
z_bru(done); |
4424 |
||
4425 |
// XC: initialize short arrays. |
|
4426 |
Label XC_template; // Instr template, never exec directly! |
|
4427 |
bind(XC_template); |
|
4428 |
z_xc(0,0,base_pointer_arg,0,base_pointer_arg); |
|
4429 |
||
4430 |
bind(doXC); |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4431 |
add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. |
42065 | 4432 |
if (VM_Version::has_ExecuteExtensions()) { |
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4433 |
z_exrl(dst_len, XC_template); // Execute XC with var. len. |
42065 | 4434 |
} else { |
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4435 |
z_larl(odd_tmp_reg, XC_template); |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4436 |
z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len. |
42065 | 4437 |
} |
4438 |
// z_bru(done); // fallthru |
|
4439 |
||
4440 |
bind(done); |
|
4441 |
||
4442 |
BLOCK_COMMENT("} Clear_Array"); |
|
4443 |
||
4444 |
int block_end = offset(); |
|
4445 |
return block_end - block_start; |
|
4446 |
} |
|
4447 |
||
4448 |
// Compiler ensures base is doubleword aligned and cnt is count of doublewords. |
|
4449 |
// Emitter does not KILL any arguments nor work registers. |
|
4450 |
// Emitter generates up to 16 XC instructions, depending on the array length. |
|
4451 |
unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { |
|
4452 |
int block_start = offset(); |
|
4453 |
int off; |
|
4454 |
int lineSize_Bytes = AllocatePrefetchStepSize; |
|
4455 |
int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; |
|
4456 |
bool doPrefetch = VM_Version::has_Prefetch(); |
|
4457 |
int XC_maxlen = 256; |
|
4458 |
int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; |
|
4459 |
||
4460 |
BLOCK_COMMENT("Clear_Array_Const {"); |
|
4461 |
assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); |
|
4462 |
||
4463 |
// Do less prefetching for very short arrays. |
|
4464 |
if (numXCInstr > 0) { |
|
4465 |
// Prefetch only some cache lines, then begin clearing. |
|
4466 |
if (doPrefetch) { |
|
4467 |
if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, |
|
4468 |
z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. |
|
4469 |
} else { |
|
4470 |
assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); |
|
4471 |
for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { |
|
4472 |
z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); |
|
4473 |
} |
|
4474 |
} |
|
4475 |
} |
|
4476 |
||
4477 |
for (off=0; off<(numXCInstr-1); off++) { |
|
4478 |
z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); |
|
4479 |
||
4480 |
// Prefetch some cache lines in advance. |
|
4481 |
if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { |
|
4482 |
z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); |
|
4483 |
} |
|
4484 |
} |
|
4485 |
if (off*XC_maxlen < cnt*BytesPerWord) { |
|
4486 |
z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); |
|
4487 |
} |
|
4488 |
} |
|
4489 |
BLOCK_COMMENT("} Clear_Array_Const"); |
|
4490 |
||
4491 |
int block_end = offset(); |
|
4492 |
return block_end - block_start; |
|
4493 |
} |
|
4494 |
||
4495 |
// Compiler ensures base is doubleword aligned and cnt is #doublewords. |
|
4496 |
// Emitter does not KILL cnt and base arguments, since they need to be copied to |
|
4497 |
// work registers anyway. |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4498 |
// Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed. |
42065 | 4499 |
// |
4500 |
// For very large arrays, exploit MVCLE H/W support. |
|
4501 |
// MVCLE instruction automatically exploits H/W-optimized page mover. |
|
4502 |
// - Bytes up to next page boundary are cleared with a series of XC to self. |
|
4503 |
// - All full pages are cleared with the page mover H/W assist. |
|
4504 |
// - Remaining bytes are again cleared by a series of XC to self. |
|
4505 |
// |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4506 |
unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) { |
42065 | 4507 |
|
4508 |
int block_start = offset(); |
|
4509 |
Register dst_len = Z_R1; // Holds dst len for MVCLE. |
|
4510 |
Register dst_addr = Z_R0; // Holds dst addr for MVCLE. |
|
4511 |
||
4512 |
BLOCK_COMMENT("Clear_Array_Const_Big {"); |
|
4513 |
||
4514 |
// Get len to clear. |
|
4515 |
load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 |
|
4516 |
||
4517 |
// Prepare other args to MVCLE. |
|
4518 |
z_lgr(dst_addr, base_pointer_arg); |
|
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4519 |
// Pass 0 as source length to MVCLE: destination will be filled with padding byte 0. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4520 |
// The even register of the register pair is not killed. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4521 |
(void) clear_reg(odd_tmp_reg, true, false); // Src len of MVCLE is zero. |
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
52760
diff
changeset
|
4522 |
MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0); |
42065 | 4523 |
BLOCK_COMMENT("} Clear_Array_Const_Big"); |
4524 |
||
4525 |
int block_end = offset(); |
|
4526 |
return block_end - block_start; |
|
4527 |
} |
|
4528 |
||
4529 |
// Allocator. |
|
4530 |
unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, |
|
4531 |
Register cnt_reg, |
|
4532 |
Register tmp1_reg, Register tmp2_reg) { |
|
4533 |
// Tmp1 is oddReg. |
|
4534 |
// Tmp2 is evenReg. |
|
4535 |
||
4536 |
int block_start = offset(); |
|
4537 |
Label doMVC, doMVCLE, done, MVC_template; |
|
4538 |
||
4539 |
BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); |
|
4540 |
||
4541 |
// Check for zero len and convert to long. |
|
4542 |
z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. |
|
4543 |
z_bre(done); // Nothing to do if len == 0. |
|
4544 |
||
4545 |
z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. |
|
4546 |
||
4547 |
z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). |
|
4548 |
z_brnh(doMVC); // If so, use executed MVC to clear. |
|
4549 |
||
4550 |
bind(doMVCLE); // A lot of data (more than 256 bytes). |
|
4551 |
// Prep dest reg pair. |
|
4552 |
z_lgr(Z_R0, dst_reg); // dst addr |
|
4553 |
// Dst len already in Z_R1. |
|
4554 |
// Prep src reg pair. |
|
4555 |
z_lgr(tmp2_reg, src_reg); // src addr |
|
4556 |
z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. |
|
4557 |
||
4558 |
// Do the copy. |
|
4559 |
move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. |
|
4560 |
z_bru(done); // All done. |
|
4561 |
||
4562 |
bind(MVC_template); // Just some data (not more than 256 bytes). |
|
4563 |
z_mvc(0, 0, dst_reg, 0, src_reg); |
|
4564 |
||
4565 |
bind(doMVC); |
|
4566 |
||
4567 |
if (VM_Version::has_ExecuteExtensions()) { |
|
4568 |
add2reg(Z_R1, -1); |
|
4569 |
} else { |
|
4570 |
add2reg(tmp1_reg, -1, Z_R1); |
|
4571 |
z_larl(Z_R1, MVC_template); |
|
4572 |
} |
|
4573 |
||
4574 |
if (VM_Version::has_Prefetch()) { |
|
4575 |
z_pfd(1, 0,Z_R0,src_reg); |
|
4576 |
z_pfd(2, 0,Z_R0,dst_reg); |
|
4577 |
// z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. |
|
4578 |
// z_pfd(2,256,Z_R0,dst_reg); |
|
4579 |
} |
|
4580 |
||
4581 |
if (VM_Version::has_ExecuteExtensions()) { |
|
4582 |
z_exrl(Z_R1, MVC_template); |
|
4583 |
} else { |
|
4584 |
z_ex(tmp1_reg, 0, Z_R0, Z_R1); |
|
4585 |
} |
|
4586 |
||
4587 |
bind(done); |
|
4588 |
||
4589 |
BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); |
|
4590 |
||
4591 |
int block_end = offset(); |
|
4592 |
return block_end - block_start; |
|
4593 |
} |
|
4594 |
||
58959 | 4595 |
#ifdef COMPILER2 |
42065 | 4596 |
//------------------------------------------------------ |
4597 |
// Special String Intrinsics. Implementation |
|
4598 |
//------------------------------------------------------ |
|
4599 |
||
4600 |
// Intrinsics for CompactStrings |
|
4601 |
||
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4602 |
// Compress char[] to byte[]. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4603 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4604 |
// Uses: cnt |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4605 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4606 |
// Early clobber: result. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4607 |
// Note: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4608 |
// cnt is signed int. Do not rely on high word! |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4609 |
// counts # characters, not bytes. |
42065 | 4610 |
// The result is the number of characters copied before the first incompatible character was found. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4611 |
// If precise is true, the processing stops exactly at this point. Otherwise, the result may be off |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4612 |
// by a few bytes. The result always indicates the number of copied characters. |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4613 |
// When used as a character index, the returned value points to the first incompatible character. |
42065 | 4614 |
// |
4615 |
// Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4616 |
// - Different number of characters may have been written to dead array (if precise is false). |
42065 | 4617 |
// - Returns a number <cnt instead of 0. (Result gets compared with cnt.) |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4618 |
unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt, |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4619 |
Register tmp, bool precise) { |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4620 |
assert_different_registers(Z_R0, Z_R1, result, src, dst, cnt, tmp); |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4621 |
|
42065 | 4622 |
if (precise) { |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4623 |
BLOCK_COMMENT("encode_iso_array {"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4624 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4625 |
BLOCK_COMMENT("string_compress {"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4626 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4627 |
int block_start = offset(); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4628 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4629 |
Register Rsrc = src; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4630 |
Register Rdst = dst; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4631 |
Register Rix = tmp; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4632 |
Register Rcnt = cnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4633 |
Register Rmask = result; // holds incompatibility check mask until result value is stored. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4634 |
Label ScalarShortcut, AllDone; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4635 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4636 |
z_iilf(Rmask, 0xFF00FF00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4637 |
z_iihf(Rmask, 0xFF00FF00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4638 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4639 |
#if 0 // Sacrifice shortcuts for code compactness |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4640 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4641 |
//---< shortcuts for short strings (very frequent) >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4642 |
// Strings with 4 and 8 characters were fond to occur very frequently. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4643 |
// Therefore, we handle them right away with minimal overhead. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4644 |
Label skipShortcut, skip4Shortcut, skip8Shortcut; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4645 |
Register Rout = Z_R0; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4646 |
z_chi(Rcnt, 4); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4647 |
z_brne(skip4Shortcut); // 4 characters are very frequent |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4648 |
z_lg(Z_R0, 0, Rsrc); // Treat exactly 4 characters specially. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4649 |
if (VM_Version::has_DistinctOpnds()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4650 |
Rout = Z_R0; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4651 |
z_ngrk(Rix, Z_R0, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4652 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4653 |
Rout = Rix; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4654 |
z_lgr(Rix, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4655 |
z_ngr(Z_R0, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4656 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4657 |
z_brnz(skipShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4658 |
z_stcmh(Rout, 5, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4659 |
z_stcm(Rout, 5, 2, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4660 |
z_lgfr(result, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4661 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4662 |
bind(skip4Shortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4663 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4664 |
z_chi(Rcnt, 8); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4665 |
z_brne(skip8Shortcut); // There's more to do... |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4666 |
z_lmg(Z_R0, Z_R1, 0, Rsrc); // Treat exactly 8 characters specially. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4667 |
if (VM_Version::has_DistinctOpnds()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4668 |
Rout = Z_R0; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4669 |
z_ogrk(Rix, Z_R0, Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4670 |
z_ngr(Rix, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4671 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4672 |
Rout = Rix; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4673 |
z_lgr(Rix, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4674 |
z_ogr(Z_R0, Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4675 |
z_ngr(Z_R0, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4676 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4677 |
z_brnz(skipShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4678 |
z_stcmh(Rout, 5, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4679 |
z_stcm(Rout, 5, 2, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4680 |
z_stcmh(Z_R1, 5, 4, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4681 |
z_stcm(Z_R1, 5, 6, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4682 |
z_lgfr(result, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4683 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4684 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4685 |
bind(skip8Shortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4686 |
clear_reg(Z_R0, true, false); // #characters already processed (none). Precond for scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4687 |
z_brl(ScalarShortcut); // Just a few characters |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4688 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4689 |
bind(skipShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4690 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4691 |
#endif |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4692 |
clear_reg(Z_R0); // make sure register is properly initialized. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4693 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4694 |
if (VM_Version::has_VectorFacility()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4695 |
const int min_vcnt = 32; // Minimum #characters required to use vector instructions. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4696 |
// Otherwise just do nothing in vector mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4697 |
// Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4698 |
const int log_min_vcnt = exact_log2(min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4699 |
Label VectorLoop, VectorDone, VectorBreak; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4700 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4701 |
VectorRegister Vtmp1 = Z_V16; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4702 |
VectorRegister Vtmp2 = Z_V17; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4703 |
VectorRegister Vmask = Z_V18; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4704 |
VectorRegister Vzero = Z_V19; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4705 |
VectorRegister Vsrc_first = Z_V20; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4706 |
VectorRegister Vsrc_last = Z_V23; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4707 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4708 |
assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4709 |
assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4710 |
z_srak(Rix, Rcnt, log_min_vcnt); // # vector loop iterations |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4711 |
z_brz(VectorDone); // not enough data for vector loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4712 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4713 |
z_vzero(Vzero); // all zeroes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4714 |
z_vgmh(Vmask, 0, 7); // generate 0xff00 mask for all 2-byte elements |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4715 |
z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4716 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4717 |
bind(VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4718 |
z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4719 |
add2reg(Rsrc, min_vcnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4720 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4721 |
//---< check for incompatible character >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4722 |
z_vo(Vtmp1, Z_V20, Z_V21); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4723 |
z_vo(Vtmp2, Z_V22, Z_V23); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4724 |
z_vo(Vtmp1, Vtmp1, Vtmp2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4725 |
z_vn(Vtmp1, Vtmp1, Vmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4726 |
z_vceqhs(Vtmp1, Vtmp1, Vzero); // high half of all chars must be zero for successful compress. |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4727 |
z_bvnt(VectorBreak); // break vector loop if not all vector elements compare eq -> incompatible character found. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4728 |
// re-process data from current iteration in break handler. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4729 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4730 |
//---< pack & store characters >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4731 |
z_vpkh(Vtmp1, Z_V20, Z_V21); // pack (src1, src2) -> tmp1 |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4732 |
z_vpkh(Vtmp2, Z_V22, Z_V23); // pack (src3, src4) -> tmp2 |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4733 |
z_vstm(Vtmp1, Vtmp2, 0, Rdst); // store packed string |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4734 |
add2reg(Rdst, min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4735 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4736 |
z_brct(Rix, VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4737 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4738 |
z_bru(VectorDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4739 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4740 |
bind(VectorBreak); |
48186
c722887b75a2
8192818: [s390]: restoring register contents calculates wrong value
lucy
parents:
48094
diff
changeset
|
4741 |
add2reg(Rsrc, -min_vcnt*2); // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4742 |
z_sll(Rix, log_min_vcnt); // # chars processed so far in VectorLoop, excl. current iteration. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4743 |
z_sr(Z_R0, Rix); // correct # chars processed in total. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4744 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4745 |
bind(VectorDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4746 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4747 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4748 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4749 |
const int min_cnt = 8; // Minimum #characters required to use unrolled loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4750 |
// Otherwise just do nothing in unrolled loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4751 |
// Must be multiple of 8. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4752 |
const int log_min_cnt = exact_log2(min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4753 |
Label UnrolledLoop, UnrolledDone, UnrolledBreak; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4754 |
|
42065 | 4755 |
if (VM_Version::has_DistinctOpnds()) { |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4756 |
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop |
42065 | 4757 |
} else { |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4758 |
z_lr(Rix, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4759 |
z_sr(Rix, Z_R0); |
42065 | 4760 |
} |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4761 |
z_sra(Rix, log_min_cnt); // unrolled loop count |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4762 |
z_brz(UnrolledDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4763 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4764 |
bind(UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4765 |
z_lmg(Z_R0, Z_R1, 0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4766 |
if (precise) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4767 |
z_ogr(Z_R1, Z_R0); // check all 8 chars for incompatibility |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4768 |
z_ngr(Z_R1, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4769 |
z_brnz(UnrolledBreak); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4770 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4771 |
z_lg(Z_R1, 8, Rsrc); // reload destroyed register |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4772 |
z_stcmh(Z_R0, 5, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4773 |
z_stcm(Z_R0, 5, 2, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4774 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4775 |
z_stcmh(Z_R0, 5, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4776 |
z_stcm(Z_R0, 5, 2, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4777 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4778 |
z_ogr(Z_R0, Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4779 |
z_ngr(Z_R0, Rmask); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4780 |
z_brnz(UnrolledBreak); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4781 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4782 |
z_stcmh(Z_R1, 5, 4, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4783 |
z_stcm(Z_R1, 5, 6, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4784 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4785 |
add2reg(Rsrc, min_cnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4786 |
add2reg(Rdst, min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4787 |
z_brct(Rix, UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4788 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4789 |
z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4790 |
z_nilf(Z_R0, ~(min_cnt-1)); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4791 |
z_tmll(Rcnt, min_cnt-1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4792 |
z_brnaz(ScalarShortcut); // if all bits zero, there is nothing left to do for scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4793 |
// Rix == 0 in all cases. |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4794 |
z_sllg(Z_R1, Rcnt, 1); // # src bytes already processed. Only lower 32 bits are valid! |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4795 |
// Z_R1 contents must be treated as unsigned operand! For huge strings, |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4796 |
// (Rcnt >= 2**30), the value may spill into the sign bit by sllg. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4797 |
z_lgfr(result, Rcnt); // all characters processed. |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4798 |
z_slgfr(Rdst, Rcnt); // restore ptr |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4799 |
z_slgfr(Rsrc, Z_R1); // restore ptr, double the element count for Rsrc restore |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4800 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4801 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4802 |
bind(UnrolledBreak); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4803 |
z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4804 |
z_nilf(Z_R0, ~(min_cnt-1)); |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4805 |
z_sll(Rix, log_min_cnt); // # chars not yet processed in UnrolledLoop (due to break), broken iteration not included. |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4806 |
z_sr(Z_R0, Rix); // fix # chars processed OK so far. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4807 |
if (!precise) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4808 |
z_lgfr(result, Z_R0); |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4809 |
z_sllg(Z_R1, Z_R0, 1); // # src bytes already processed. Only lower 32 bits are valid! |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4810 |
// Z_R1 contents must be treated as unsigned operand! For huge strings, |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4811 |
// (Rcnt >= 2**30), the value may spill into the sign bit by sllg. |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4812 |
z_aghi(result, min_cnt/2); // min_cnt/2 characters have already been written |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4813 |
// but ptrs were not updated yet. |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4814 |
z_slgfr(Rdst, Z_R0); // restore ptr |
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4815 |
z_slgfr(Rsrc, Z_R1); // restore ptr, double the element count for Rsrc restore |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4816 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4817 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4818 |
bind(UnrolledDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4819 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4820 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4821 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4822 |
Label ScalarLoop, ScalarDone, ScalarBreak; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4823 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4824 |
bind(ScalarShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4825 |
z_ltgfr(result, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4826 |
z_brz(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4827 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4828 |
#if 0 // Sacrifice shortcuts for code compactness |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4829 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4830 |
//---< Special treatment for very short strings (one or two characters) >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4831 |
// For these strings, we are sure that the above code was skipped. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4832 |
// Thus, no registers were modified, register restore is not required. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4833 |
Label ScalarDoit, Scalar2Char; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4834 |
z_chi(Rcnt, 2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4835 |
z_brh(ScalarDoit); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4836 |
z_llh(Z_R1, 0, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4837 |
z_bre(Scalar2Char); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4838 |
z_tmll(Z_R1, 0xff00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4839 |
z_lghi(result, 0); // cnt == 1, first char invalid, no chars successfully processed |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4840 |
z_brnaz(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4841 |
z_stc(Z_R1, 0, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4842 |
z_lghi(result, 1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4843 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4844 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4845 |
bind(Scalar2Char); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4846 |
z_llh(Z_R0, 2, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4847 |
z_tmll(Z_R1, 0xff00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4848 |
z_lghi(result, 0); // cnt == 2, first char invalid, no chars successfully processed |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4849 |
z_brnaz(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4850 |
z_stc(Z_R1, 0, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4851 |
z_tmll(Z_R0, 0xff00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4852 |
z_lghi(result, 1); // cnt == 2, second char invalid, one char successfully processed |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4853 |
z_brnaz(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4854 |
z_stc(Z_R0, 1, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4855 |
z_lghi(result, 2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4856 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4857 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4858 |
bind(ScalarDoit); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4859 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4860 |
#endif |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4861 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4862 |
if (VM_Version::has_DistinctOpnds()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4863 |
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4864 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4865 |
z_lr(Rix, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4866 |
z_sr(Rix, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4867 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4868 |
z_lgfr(result, Rcnt); // # processed characters (if all runs ok). |
48331
a8e39cc7b88f
8193443: [s390]: EncodeISOArray generates wrong vector code
lucy
parents:
48186
diff
changeset
|
4869 |
z_brz(ScalarDone); // uses CC from Rix calculation |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4870 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4871 |
bind(ScalarLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4872 |
z_llh(Z_R1, 0, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4873 |
z_tmll(Z_R1, 0xff00); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4874 |
z_brnaz(ScalarBreak); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4875 |
z_stc(Z_R1, 0, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4876 |
add2reg(Rsrc, 2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4877 |
add2reg(Rdst, 1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4878 |
z_brct(Rix, ScalarLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4879 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4880 |
z_bru(ScalarDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4881 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4882 |
bind(ScalarBreak); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4883 |
z_sr(result, Rix); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4884 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4885 |
bind(ScalarDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4886 |
z_sgfr(Rdst, result); // restore ptr |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4887 |
z_sgfr(Rsrc, result); // restore ptr, double the element count for Rsrc restore |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4888 |
z_sgfr(Rsrc, result); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4889 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4890 |
bind(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4891 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4892 |
if (precise) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4893 |
BLOCK_COMMENT("} encode_iso_array"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4894 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4895 |
BLOCK_COMMENT("} string_compress"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4896 |
} |
42065 | 4897 |
return offset() - block_start; |
4898 |
} |
|
4899 |
||
4900 |
// Inflate byte[] to char[]. |
|
4901 |
unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { |
|
4902 |
int block_start = offset(); |
|
4903 |
||
4904 |
BLOCK_COMMENT("string_inflate {"); |
|
4905 |
||
4906 |
Register stop_char = Z_R0; |
|
4907 |
Register table = Z_R1; |
|
4908 |
Register src_addr = tmp; |
|
4909 |
||
4910 |
assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); |
|
4911 |
assert(dst->encoding()%2 == 0, "must be even reg"); |
|
4912 |
assert(cnt->encoding()%2 == 1, "must be odd reg"); |
|
4913 |
assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); |
|
4914 |
||
4915 |
StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) |
|
4916 |
clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. |
|
4917 |
lgr_if_needed(src_addr, src); |
|
4918 |
z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. |
|
4919 |
||
4920 |
translate_ot(dst, src_addr, /* mask = */ 0x0001); |
|
4921 |
||
4922 |
BLOCK_COMMENT("} string_inflate"); |
|
4923 |
||
4924 |
return offset() - block_start; |
|
4925 |
} |
|
4926 |
||
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4927 |
// Inflate byte[] to char[]. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4928 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4929 |
// Uses: cnt |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4930 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4931 |
// Note: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4932 |
// cnt is signed int. Do not rely on high word! |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4933 |
// counts # characters, not bytes. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4934 |
unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4935 |
assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp); |
42065 | 4936 |
|
4937 |
BLOCK_COMMENT("string_inflate {"); |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4938 |
int block_start = offset(); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4939 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4940 |
Register Rcnt = cnt; // # characters (src: bytes, dst: char (2-byte)), remaining after current loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4941 |
Register Rix = tmp; // loop index |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4942 |
Register Rsrc = src; // addr(src array) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4943 |
Register Rdst = dst; // addr(dst array) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4944 |
Label ScalarShortcut, AllDone; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4945 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4946 |
#if 0 // Sacrifice shortcuts for code compactness |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4947 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4948 |
//---< shortcuts for short strings (very frequent) >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4949 |
Label skipShortcut, skip4Shortcut; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4950 |
z_ltr(Rcnt, Rcnt); // absolutely nothing to do for strings of len == 0. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4951 |
z_brz(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4952 |
clear_reg(Z_R0); // make sure registers are properly initialized. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4953 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4954 |
z_chi(Rcnt, 4); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4955 |
z_brne(skip4Shortcut); // 4 characters are very frequent |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4956 |
z_icm(Z_R0, 5, 0, Rsrc); // Treat exactly 4 characters specially. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4957 |
z_icm(Z_R1, 5, 2, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4958 |
z_stm(Z_R0, Z_R1, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4959 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4960 |
bind(skip4Shortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4961 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4962 |
z_chi(Rcnt, 8); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4963 |
z_brh(skipShortcut); // There's a lot to do... |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4964 |
z_lgfr(Z_R0, Rcnt); // remaining #characters (<= 8). Precond for scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4965 |
// This does not destroy the "register cleared" state of Z_R0. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4966 |
z_brl(ScalarShortcut); // Just a few characters |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4967 |
z_icmh(Z_R0, 5, 0, Rsrc); // Treat exactly 8 characters specially. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4968 |
z_icmh(Z_R1, 5, 4, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4969 |
z_icm(Z_R0, 5, 2, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4970 |
z_icm(Z_R1, 5, 6, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4971 |
z_stmg(Z_R0, Z_R1, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4972 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4973 |
bind(skipShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4974 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4975 |
#endif |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4976 |
clear_reg(Z_R0); // make sure register is properly initialized. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4977 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4978 |
if (VM_Version::has_VectorFacility()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4979 |
const int min_vcnt = 32; // Minimum #characters required to use vector instructions. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4980 |
// Otherwise just do nothing in vector mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4981 |
// Must be multiple of vector register length (16 bytes = 128 bits). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4982 |
const int log_min_vcnt = exact_log2(min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4983 |
Label VectorLoop, VectorDone; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4984 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4985 |
assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4986 |
z_srak(Rix, Rcnt, log_min_vcnt); // calculate # vector loop iterations |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4987 |
z_brz(VectorDone); // skip if none |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4988 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4989 |
z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4990 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4991 |
bind(VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4992 |
z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4993 |
add2reg(Rsrc, min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4994 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4995 |
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4996 |
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4997 |
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4998 |
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
4999 |
z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5000 |
add2reg(Rdst, min_vcnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5001 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5002 |
z_brct(Rix, VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5003 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5004 |
bind(VectorDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5005 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5006 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5007 |
const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5008 |
// Otherwise just do nothing in unrolled scalar mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5009 |
// Must be multiple of 8. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5010 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5011 |
const int log_min_cnt = exact_log2(min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5012 |
Label UnrolledLoop, UnrolledDone; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5013 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5014 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5015 |
if (VM_Version::has_DistinctOpnds()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5016 |
z_srk(Rix, Rcnt, Z_R0); // remaining # chars to process in unrolled loop |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5017 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5018 |
z_lr(Rix, Rcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5019 |
z_sr(Rix, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5020 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5021 |
z_sra(Rix, log_min_cnt); // unrolled loop count |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5022 |
z_brz(UnrolledDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5023 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5024 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5025 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5026 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5027 |
bind(UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5028 |
z_icmh(Z_R0, 5, 0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5029 |
z_icmh(Z_R1, 5, 4, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5030 |
z_icm(Z_R0, 5, 2, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5031 |
z_icm(Z_R1, 5, 6, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5032 |
add2reg(Rsrc, min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5033 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5034 |
z_stmg(Z_R0, Z_R1, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5035 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5036 |
add2reg(Rdst, min_cnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5037 |
z_brct(Rix, UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5038 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5039 |
bind(UnrolledDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5040 |
z_lgfr(Z_R0, Rcnt); // # chars left over after unrolled loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5041 |
z_nilf(Z_R0, min_cnt-1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5042 |
z_brnz(ScalarShortcut); // if zero, there is nothing left to do for scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5043 |
// Rix == 0 in all cases. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5044 |
z_sgfr(Z_R0, Rcnt); // negative # characters the ptrs have been advanced previously. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5045 |
z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5046 |
z_agr(Rdst, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5047 |
z_agr(Rsrc, Z_R0); // restore ptr. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5048 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5049 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5050 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5051 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5052 |
bind(ScalarShortcut); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5053 |
// Z_R0 must contain remaining # characters as 64-bit signed int here. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5054 |
// register contents is preserved over scalar processing (for register fixup). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5055 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5056 |
#if 0 // Sacrifice shortcuts for code compactness |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5057 |
{ |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5058 |
Label ScalarDefault; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5059 |
z_chi(Rcnt, 2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5060 |
z_brh(ScalarDefault); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5061 |
z_llc(Z_R0, 0, Z_R0, Rsrc); // 6 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5062 |
z_sth(Z_R0, 0, Z_R0, Rdst); // 4 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5063 |
z_brl(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5064 |
z_llc(Z_R0, 1, Z_R0, Rsrc); // 6 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5065 |
z_sth(Z_R0, 2, Z_R0, Rdst); // 4 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5066 |
z_bru(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5067 |
bind(ScalarDefault); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5068 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5069 |
#endif |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5070 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5071 |
Label CodeTable; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5072 |
// Some comments on Rix calculation: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5073 |
// - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5074 |
// - high word of both Rix and Rcnt may contain garbage |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5075 |
// - the final lngfr takes care of that garbage, extending the sign to high word |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5076 |
z_sllg(Rix, Z_R0, 2); // calculate 10*Rix = (4*Rix + Rix)*2 |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5077 |
z_ar(Rix, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5078 |
z_larl(Z_R1, CodeTable); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5079 |
z_sll(Rix, 1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5080 |
z_lngfr(Rix, Rix); // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)]. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5081 |
z_bc(Assembler::bcondAlways, 0, Rix, Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5082 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5083 |
z_llc(Z_R1, 6, Z_R0, Rsrc); // 6 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5084 |
z_sth(Z_R1, 12, Z_R0, Rdst); // 4 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5085 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5086 |
z_llc(Z_R1, 5, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5087 |
z_sth(Z_R1, 10, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5088 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5089 |
z_llc(Z_R1, 4, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5090 |
z_sth(Z_R1, 8, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5091 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5092 |
z_llc(Z_R1, 3, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5093 |
z_sth(Z_R1, 6, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5094 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5095 |
z_llc(Z_R1, 2, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5096 |
z_sth(Z_R1, 4, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5097 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5098 |
z_llc(Z_R1, 1, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5099 |
z_sth(Z_R1, 2, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5100 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5101 |
z_llc(Z_R1, 0, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5102 |
z_sth(Z_R1, 0, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5103 |
bind(CodeTable); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5104 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5105 |
z_chi(Rcnt, 8); // no fixup for small strings. Rdst, Rsrc were not modified. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5106 |
z_brl(AllDone); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5107 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5108 |
z_sgfr(Z_R0, Rcnt); // # characters the ptrs have been advanced previously. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5109 |
z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5110 |
z_agr(Rdst, Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5111 |
z_agr(Rsrc, Z_R0); // restore ptr. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5112 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5113 |
bind(AllDone); |
42065 | 5114 |
|
5115 |
BLOCK_COMMENT("} string_inflate"); |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5116 |
return offset() - block_start; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5117 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5118 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5119 |
// Inflate byte[] to char[], length known at compile time. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5120 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5121 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5122 |
// Note: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5123 |
// len is signed int. Counts # characters, not bytes. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5124 |
unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5125 |
assert_different_registers(Z_R0, Z_R1, src, dst, tmp); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5126 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5127 |
BLOCK_COMMENT("string_inflate_const {"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5128 |
int block_start = offset(); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5129 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5130 |
Register Rix = tmp; // loop index |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5131 |
Register Rsrc = src; // addr(src array) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5132 |
Register Rdst = dst; // addr(dst array) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5133 |
Label ScalarShortcut, AllDone; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5134 |
int nprocessed = 0; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5135 |
int src_off = 0; // compensate for saved (optimized away) ptr advancement. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5136 |
int dst_off = 0; // compensate for saved (optimized away) ptr advancement. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5137 |
bool restore_inputs = false; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5138 |
bool workreg_clear = false; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5139 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5140 |
if ((len >= 32) && VM_Version::has_VectorFacility()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5141 |
const int min_vcnt = 32; // Minimum #characters required to use vector instructions. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5142 |
// Otherwise just do nothing in vector mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5143 |
// Must be multiple of vector register length (16 bytes = 128 bits). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5144 |
const int log_min_vcnt = exact_log2(min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5145 |
const int iterations = (len - nprocessed) >> log_min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5146 |
nprocessed += iterations << log_min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5147 |
Label VectorLoop; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5148 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5149 |
if (iterations == 1) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5150 |
z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc); // get next 32 characters (single-byte) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5151 |
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5152 |
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5153 |
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5154 |
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5155 |
z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5156 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5157 |
src_off += min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5158 |
dst_off += min_vcnt*2; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5159 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5160 |
restore_inputs = true; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5161 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5162 |
z_lgfi(Rix, len>>log_min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5163 |
bind(VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5164 |
z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5165 |
add2reg(Rsrc, min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5166 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5167 |
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5168 |
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5169 |
z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5170 |
z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5171 |
z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5172 |
add2reg(Rdst, min_vcnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5173 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5174 |
z_brct(Rix, VectorLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5175 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5176 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5177 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5178 |
if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5179 |
const int min_vcnt = 16; // Minimum #characters required to use vector instructions. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5180 |
// Otherwise just do nothing in vector mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5181 |
// Must be multiple of vector register length (16 bytes = 128 bits). |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5182 |
const int log_min_vcnt = exact_log2(min_vcnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5183 |
const int iterations = (len - nprocessed) >> log_min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5184 |
nprocessed += iterations << log_min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5185 |
assert(iterations == 1, "must be!"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5186 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5187 |
z_vl(Z_V20, 0+src_off, Z_R0, Rsrc); // get next 16 characters (single-byte) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5188 |
z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5189 |
z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5190 |
z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5191 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5192 |
src_off += min_vcnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5193 |
dst_off += min_vcnt*2; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5194 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5195 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5196 |
if ((len-nprocessed) > 8) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5197 |
const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5198 |
// Otherwise just do nothing in unrolled scalar mode. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5199 |
// Must be multiple of 8. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5200 |
const int log_min_cnt = exact_log2(min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5201 |
const int iterations = (len - nprocessed) >> log_min_cnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5202 |
nprocessed += iterations << log_min_cnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5203 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5204 |
//---< avoid loop overhead/ptr increment for small # iterations >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5205 |
if (iterations <= 2) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5206 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5207 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5208 |
workreg_clear = true; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5209 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5210 |
z_icmh(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5211 |
z_icmh(Z_R1, 5, 4+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5212 |
z_icm(Z_R0, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5213 |
z_icm(Z_R1, 5, 6+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5214 |
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5215 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5216 |
src_off += min_cnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5217 |
dst_off += min_cnt*2; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5218 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5219 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5220 |
if (iterations == 2) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5221 |
z_icmh(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5222 |
z_icmh(Z_R1, 5, 4+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5223 |
z_icm(Z_R0, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5224 |
z_icm(Z_R1, 5, 6+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5225 |
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5226 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5227 |
src_off += min_cnt; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5228 |
dst_off += min_cnt*2; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5229 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5230 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5231 |
if (iterations > 2) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5232 |
Label UnrolledLoop; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5233 |
restore_inputs = true; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5234 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5235 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5236 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5237 |
workreg_clear = true; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5238 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5239 |
z_lgfi(Rix, iterations); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5240 |
bind(UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5241 |
z_icmh(Z_R0, 5, 0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5242 |
z_icmh(Z_R1, 5, 4, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5243 |
z_icm(Z_R0, 5, 2, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5244 |
z_icm(Z_R1, 5, 6, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5245 |
add2reg(Rsrc, min_cnt); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5246 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5247 |
z_stmg(Z_R0, Z_R1, 0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5248 |
add2reg(Rdst, min_cnt*2); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5249 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5250 |
z_brct(Rix, UnrolledLoop); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5251 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5252 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5253 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5254 |
if ((len-nprocessed) > 0) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5255 |
switch (len-nprocessed) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5256 |
case 8: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5257 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5258 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5259 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5260 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5261 |
z_icmh(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5262 |
z_icmh(Z_R1, 5, 4+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5263 |
z_icm(Z_R0, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5264 |
z_icm(Z_R1, 5, 6+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5265 |
z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5266 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5267 |
case 7: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5268 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5269 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5270 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5271 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5272 |
clear_reg(Rix); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5273 |
z_icm(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5274 |
z_icm(Z_R1, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5275 |
z_icm(Rix, 5, 4+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5276 |
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5277 |
z_llc(Z_R0, 6+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5278 |
z_st(Rix, 8+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5279 |
z_sth(Z_R0, 12+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5280 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5281 |
case 6: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5282 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5283 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5284 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5285 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5286 |
clear_reg(Rix); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5287 |
z_icm(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5288 |
z_icm(Z_R1, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5289 |
z_icm(Rix, 5, 4+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5290 |
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5291 |
z_st(Rix, 8+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5292 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5293 |
case 5: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5294 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5295 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5296 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5297 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5298 |
z_icm(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5299 |
z_icm(Z_R1, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5300 |
z_llc(Rix, 4+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5301 |
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5302 |
z_sth(Rix, 8+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5303 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5304 |
case 4: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5305 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5306 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5307 |
clear_reg(Z_R1); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5308 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5309 |
z_icm(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5310 |
z_icm(Z_R1, 5, 2+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5311 |
z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5312 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5313 |
case 3: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5314 |
if (!workreg_clear) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5315 |
clear_reg(Z_R0); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5316 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5317 |
z_llc(Z_R1, 2+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5318 |
z_icm(Z_R0, 5, 0+src_off, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5319 |
z_sth(Z_R1, 4+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5320 |
z_st(Z_R0, 0+dst_off, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5321 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5322 |
case 2: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5323 |
z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5324 |
z_llc(Z_R1, 1+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5325 |
z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5326 |
z_sth(Z_R1, 2+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5327 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5328 |
case 1: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5329 |
z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5330 |
z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5331 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5332 |
default: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5333 |
guarantee(false, "Impossible"); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5334 |
break; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5335 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5336 |
src_off += len-nprocessed; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5337 |
dst_off += (len-nprocessed)*2; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5338 |
nprocessed = len; |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5339 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5340 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5341 |
//---< restore modified input registers >--- |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5342 |
if ((nprocessed > 0) && restore_inputs) { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5343 |
z_agfi(Rsrc, -(nprocessed-src_off)); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5344 |
if (nprocessed < 1000000000) { // avoid int overflow |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5345 |
z_agfi(Rdst, -(nprocessed*2-dst_off)); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5346 |
} else { |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5347 |
z_agfi(Rdst, -(nprocessed-dst_off)); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5348 |
z_agfi(Rdst, -nprocessed); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5349 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5350 |
} |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5351 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47580
diff
changeset
|
5352 |
BLOCK_COMMENT("} string_inflate_const"); |
42065 | 5353 |
return offset() - block_start; |
5354 |
} |
|
5355 |
||
5356 |
// Kills src. |
|
5357 |
unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, |
|
5358 |
Register odd_reg, Register even_reg, Register tmp) { |
|
5359 |
int block_start = offset(); |
|
5360 |
Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; |
|
5361 |
const Register addr = src, mask = tmp; |
|
5362 |
||
5363 |
BLOCK_COMMENT("has_negatives {"); |
|
5364 |
||
5365 |
z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) |
|
5366 |
z_llilf(mask, 0x80808080); |
|
5367 |
z_lhi(result, 1); // Assume true. |
|
5368 |
// Last possible addr for fast loop. |
|
5369 |
z_lay(odd_reg, -16, Z_R1, src); |
|
5370 |
z_chi(cnt, 16); |
|
5371 |
z_brl(Lslow); |
|
5372 |
||
5373 |
// ind1: index, even_reg: index increment, odd_reg: index limit |
|
5374 |
z_iihf(mask, 0x80808080); |
|
5375 |
z_lghi(even_reg, 16); |
|
5376 |
||
5377 |
bind(Lloop1); // 16 bytes per iteration. |
|
5378 |
z_lg(Z_R0, Address(addr)); |
|
5379 |
z_lg(Z_R1, Address(addr, 8)); |
|
5380 |
z_ogr(Z_R0, Z_R1); |
|
5381 |
z_ngr(Z_R0, mask); |
|
5382 |
z_brne(Ldone); // If found return 1. |
|
5383 |
z_brxlg(addr, even_reg, Lloop1); |
|
5384 |
||
5385 |
bind(Lslow); |
|
5386 |
z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. |
|
5387 |
z_lghi(even_reg, 1); |
|
5388 |
z_cgr(addr, odd_reg); |
|
5389 |
z_brh(Lnotfound); |
|
5390 |
||
5391 |
bind(Lloop2); // 1 byte per iteration. |
|
5392 |
z_cli(Address(addr), 0x80); |
|
5393 |
z_brnl(Ldone); // If found return 1. |
|
5394 |
z_brxlg(addr, even_reg, Lloop2); |
|
5395 |
||
5396 |
bind(Lnotfound); |
|
5397 |
z_lhi(result, 0); |
|
5398 |
||
5399 |
bind(Ldone); |
|
5400 |
||
5401 |
BLOCK_COMMENT("} has_negatives"); |
|
5402 |
||
5403 |
return offset() - block_start; |
|
5404 |
} |
|
5405 |
||
5406 |
// kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result |
|
5407 |
unsigned int MacroAssembler::string_compare(Register str1, Register str2, |
|
5408 |
Register cnt1, Register cnt2, |
|
5409 |
Register odd_reg, Register even_reg, Register result, int ae) { |
|
5410 |
int block_start = offset(); |
|
5411 |
||
5412 |
assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); |
|
5413 |
assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); |
|
5414 |
||
5415 |
// If strings are equal up to min length, return the length difference. |
|
5416 |
const Register diff = result, // Pre-set result with length difference. |
|
5417 |
min = cnt1, // min number of bytes |
|
5418 |
tmp = cnt2; |
|
5419 |
||
5420 |
// Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) |
|
5421 |
// we interchange str1 and str2 in the UL case and negate the result. |
|
5422 |
// Like this, str1 is always latin1 encoded, except for the UU case. |
|
5423 |
// In addition, we need 0 (or sign which is 0) extend when using 64 bit register. |
|
5424 |
const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); |
|
5425 |
||
5426 |
BLOCK_COMMENT("string_compare {"); |
|
5427 |
||
5428 |
if (used_as_LU) { |
|
5429 |
z_srl(cnt2, 1); |
|
5430 |
} |
|
5431 |
||
5432 |
// See if the lengths are different, and calculate min in cnt1. |
|
5433 |
// Save diff in case we need it for a tie-breaker. |
|
5434 |
||
5435 |
// diff = cnt1 - cnt2 |
|
5436 |
if (VM_Version::has_DistinctOpnds()) { |
|
5437 |
z_srk(diff, cnt1, cnt2); |
|
5438 |
} else { |
|
5439 |
z_lr(diff, cnt1); |
|
5440 |
z_sr(diff, cnt2); |
|
5441 |
} |
|
5442 |
if (str1 != str2) { |
|
5443 |
if (VM_Version::has_LoadStoreConditional()) { |
|
5444 |
z_locr(min, cnt2, Assembler::bcondHigh); |
|
5445 |
} else { |
|
5446 |
Label Lskip; |
|
5447 |
z_brl(Lskip); // min ok if cnt1 < cnt2 |
|
5448 |
z_lr(min, cnt2); // min = cnt2 |
|
5449 |
bind(Lskip); |
|
5450 |
} |
|
5451 |
} |
|
5452 |
||
5453 |
if (ae == StrIntrinsicNode::UU) { |
|
5454 |
z_sra(diff, 1); |
|
5455 |
} |
|
5456 |
if (str1 != str2) { |
|
5457 |
Label Ldone; |
|
5458 |
if (used_as_LU) { |
|
5459 |
// Loop which searches the first difference character by character. |
|
5460 |
Label Lloop; |
|
5461 |
const Register ind1 = Z_R1, |
|
5462 |
ind2 = min; |
|
5463 |
int stride1 = 1, stride2 = 2; // See comment above. |
|
5464 |
||
5465 |
// ind1: index, even_reg: index increment, odd_reg: index limit |
|
5466 |
z_llilf(ind1, (unsigned int)(-stride1)); |
|
5467 |
z_lhi(even_reg, stride1); |
|
5468 |
add2reg(odd_reg, -stride1, min); |
|
5469 |
clear_reg(ind2); // kills min |
|
5470 |
||
5471 |
bind(Lloop); |
|
5472 |
z_brxh(ind1, even_reg, Ldone); |
|
5473 |
z_llc(tmp, Address(str1, ind1)); |
|
5474 |
z_llh(Z_R0, Address(str2, ind2)); |
|
5475 |
z_ahi(ind2, stride2); |
|
5476 |
z_sr(tmp, Z_R0); |
|
5477 |
z_bre(Lloop); |
|
5478 |
||
5479 |
z_lr(result, tmp); |
|
5480 |
||
5481 |
} else { |
|
5482 |
// Use clcle in fast loop (only for same encoding). |
|
5483 |
z_lgr(Z_R0, str1); |
|
5484 |
z_lgr(even_reg, str2); |
|
5485 |
z_llgfr(Z_R1, min); |
|
5486 |
z_llgfr(odd_reg, min); |
|
5487 |
||
5488 |
if (ae == StrIntrinsicNode::LL) { |
|
5489 |
compare_long_ext(Z_R0, even_reg, 0); |
|
5490 |
} else { |
|
5491 |
compare_long_uni(Z_R0, even_reg, 0); |
|
5492 |
} |
|
5493 |
z_bre(Ldone); |
|
5494 |
z_lgr(Z_R1, Z_R0); |
|
5495 |
if (ae == StrIntrinsicNode::LL) { |
|
5496 |
z_llc(Z_R0, Address(even_reg)); |
|
5497 |
z_llc(result, Address(Z_R1)); |
|
5498 |
} else { |
|
5499 |
z_llh(Z_R0, Address(even_reg)); |
|
5500 |
z_llh(result, Address(Z_R1)); |
|
5501 |
} |
|
5502 |
z_sr(result, Z_R0); |
|
5503 |
} |
|
5504 |
||
5505 |
// Otherwise, return the difference between the first mismatched chars. |
|
5506 |
bind(Ldone); |
|
5507 |
} |
|
5508 |
||
5509 |
if (ae == StrIntrinsicNode::UL) { |
|
5510 |
z_lcr(result, result); // Negate result (see note above). |
|
5511 |
} |
|
5512 |
||
5513 |
BLOCK_COMMENT("} string_compare"); |
|
5514 |
||
5515 |
return offset() - block_start; |
|
5516 |
} |
|
5517 |
||
5518 |
unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, |
|
5519 |
Register odd_reg, Register even_reg, Register result, bool is_byte) { |
|
5520 |
int block_start = offset(); |
|
5521 |
||
5522 |
BLOCK_COMMENT("array_equals {"); |
|
5523 |
||
5524 |
assert_different_registers(ary1, limit, odd_reg, even_reg); |
|
5525 |
assert_different_registers(ary2, limit, odd_reg, even_reg); |
|
5526 |
||
5527 |
Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; |
|
5528 |
int base_offset = 0; |
|
5529 |
||
5530 |
if (ary1 != ary2) { |
|
5531 |
if (is_array_equ) { |
|
5532 |
base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); |
|
5533 |
||
5534 |
// Return true if the same array. |
|
5535 |
compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); |
|
5536 |
||
5537 |
// Return false if one of them is NULL. |
|
5538 |
compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); |
|
5539 |
compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); |
|
5540 |
||
5541 |
// Load the lengths of arrays. |
|
5542 |
z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); |
|
5543 |
||
5544 |
// Return false if the two arrays are not equal length. |
|
5545 |
z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); |
|
5546 |
z_brne(Ldone_false); |
|
5547 |
||
5548 |
// string len in bytes (right operand) |
|
5549 |
if (!is_byte) { |
|
5550 |
z_chi(odd_reg, 128); |
|
5551 |
z_sll(odd_reg, 1); // preserves flags |
|
5552 |
z_brh(Lclcle); |
|
5553 |
} else { |
|
5554 |
compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); |
|
5555 |
} |
|
5556 |
} else { |
|
5557 |
z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. |
|
5558 |
compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); |
|
5559 |
} |
|
5560 |
||
5561 |
||
5562 |
// Use clc instruction for up to 256 bytes. |
|
5563 |
{ |
|
5564 |
Register str1_reg = ary1, |
|
5565 |
str2_reg = ary2; |
|
5566 |
if (is_array_equ) { |
|
5567 |
str1_reg = Z_R1; |
|
5568 |
str2_reg = even_reg; |
|
5569 |
add2reg(str1_reg, base_offset, ary1); // string addr (left operand) |
|
5570 |
add2reg(str2_reg, base_offset, ary2); // string addr (right operand) |
|
5571 |
} |
|
5572 |
z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. |
|
5573 |
z_brl(Ldone_true); |
|
5574 |
// Note: We could jump to the template if equal. |
|
5575 |
||
5576 |
assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); |
|
5577 |
z_exrl(odd_reg, CLC_template); |
|
5578 |
z_bre(Ldone_true); |
|
5579 |
// fall through |
|
5580 |
||
5581 |
bind(Ldone_false); |
|
5582 |
clear_reg(result); |
|
5583 |
z_bru(Ldone); |
|
5584 |
||
5585 |
bind(CLC_template); |
|
5586 |
z_clc(0, 0, str1_reg, 0, str2_reg); |
|
5587 |
} |
|
5588 |
||
5589 |
// Use clcle instruction. |
|
5590 |
{ |
|
5591 |
bind(Lclcle); |
|
5592 |
add2reg(even_reg, base_offset, ary2); // string addr (right operand) |
|
5593 |
add2reg(Z_R0, base_offset, ary1); // string addr (left operand) |
|
5594 |
||
5595 |
z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) |
|
5596 |
if (is_byte) { |
|
5597 |
compare_long_ext(Z_R0, even_reg, 0); |
|
5598 |
} else { |
|
5599 |
compare_long_uni(Z_R0, even_reg, 0); |
|
5600 |
} |
|
5601 |
z_lghi(result, 0); // Preserve flags. |
|
5602 |
z_brne(Ldone); |
|
5603 |
} |
|
5604 |
} |
|
5605 |
// fall through |
|
5606 |
||
5607 |
bind(Ldone_true); |
|
5608 |
z_lghi(result, 1); // All characters are equal. |
|
5609 |
bind(Ldone); |
|
5610 |
||
5611 |
BLOCK_COMMENT("} array_equals"); |
|
5612 |
||
5613 |
return offset() - block_start; |
|
5614 |
} |
|
5615 |
||
5616 |
// kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result |
|
5617 |
unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, |
|
5618 |
Register needle, Register needlecnt, int needlecntval, |
|
5619 |
Register odd_reg, Register even_reg, int ae) { |
|
5620 |
int block_start = offset(); |
|
5621 |
||
5622 |
// Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! |
|
5623 |
assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); |
|
5624 |
const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; |
|
5625 |
const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; |
|
5626 |
Label L_needle1, L_Found, L_NotFound; |
|
5627 |
||
5628 |
BLOCK_COMMENT("string_indexof {"); |
|
5629 |
||
5630 |
if (needle == haystack) { |
|
5631 |
z_lhi(result, 0); |
|
5632 |
} else { |
|
5633 |
||
5634 |
// Load first character of needle (R0 used by search_string instructions). |
|
5635 |
if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } |
|
5636 |
||
5637 |
// Compute last haystack addr to use if no match gets found. |
|
5638 |
if (needlecnt != noreg) { // variable needlecnt |
|
5639 |
z_ahi(needlecnt, -1); // Remaining characters after first one. |
|
5640 |
z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. |
|
5641 |
if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. |
|
5642 |
} else { // constant needlecnt |
|
5643 |
assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); |
|
5644 |
// Compute index succeeding last element to compare. |
|
5645 |
if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } |
|
5646 |
} |
|
5647 |
||
5648 |
z_llgfr(haycnt, haycnt); // Clear high half. |
|
5649 |
z_lgr(result, haystack); // Final result will be computed from needle start pointer. |
|
5650 |
if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. |
|
5651 |
z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). |
|
5652 |
||
5653 |
if (h_csize != n_csize) { |
|
5654 |
assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); |
|
5655 |
||
5656 |
if (needlecnt != noreg || needlecntval != 1) { |
|
5657 |
if (needlecnt != noreg) { |
|
5658 |
compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); |
|
5659 |
} |
|
5660 |
||
5661 |
// Main Loop: UL version (now we have at least 2 characters). |
|
5662 |
Label L_OuterLoop, L_InnerLoop, L_Skip; |
|
5663 |
bind(L_OuterLoop); // Search for 1st 2 characters. |
|
5664 |
z_lgr(Z_R1, haycnt); |
|
5665 |
MacroAssembler::search_string_uni(Z_R1, result); |
|
5666 |
z_brc(Assembler::bcondNotFound, L_NotFound); |
|
5667 |
z_lgr(result, Z_R1); |
|
5668 |
||
5669 |
z_lghi(Z_R1, n_csize); |
|
5670 |
z_lghi(even_reg, h_csize); |
|
5671 |
bind(L_InnerLoop); |
|
5672 |
z_llgc(odd_reg, Address(needle, Z_R1)); |
|
5673 |
z_ch(odd_reg, Address(result, even_reg)); |
|
5674 |
z_brne(L_Skip); |
|
5675 |
if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } |
|
5676 |
z_brnl(L_Found); |
|
5677 |
z_aghi(Z_R1, n_csize); |
|
5678 |
z_aghi(even_reg, h_csize); |
|
5679 |
z_bru(L_InnerLoop); |
|
5680 |
||
5681 |
bind(L_Skip); |
|
5682 |
z_aghi(result, h_csize); // This is the new address we want to use for comparing. |
|
5683 |
z_bru(L_OuterLoop); |
|
5684 |
} |
|
5685 |
||
5686 |
} else { |
|
5687 |
const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); |
|
5688 |
Label L_clcle; |
|
5689 |
||
5690 |
if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { |
|
5691 |
if (needlecnt != noreg) { |
|
5692 |
compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); |
|
5693 |
z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) |
|
5694 |
z_brl(L_needle1); |
|
5695 |
} |
|
5696 |
||
5697 |
// Main Loop: clc version (now we have at least 2 characters). |
|
5698 |
Label L_OuterLoop, CLC_template; |
|
5699 |
bind(L_OuterLoop); // Search for 1st 2 characters. |
|
5700 |
z_lgr(Z_R1, haycnt); |
|
5701 |
if (h_csize == 1) { |
|
5702 |
MacroAssembler::search_string(Z_R1, result); |
|
5703 |
} else { |
|
5704 |
MacroAssembler::search_string_uni(Z_R1, result); |
|
5705 |
} |
|
5706 |
z_brc(Assembler::bcondNotFound, L_NotFound); |
|
5707 |
z_lgr(result, Z_R1); |
|
5708 |
||
5709 |
if (needlecnt != noreg) { |
|
5710 |
assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); |
|
5711 |
z_exrl(needlecnt, CLC_template); |
|
5712 |
} else { |
|
5713 |
z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); |
|
5714 |
} |
|
5715 |
z_bre(L_Found); |
|
5716 |
z_aghi(result, h_csize); // This is the new address we want to use for comparing. |
|
5717 |
z_bru(L_OuterLoop); |
|
5718 |
||
5719 |
if (needlecnt != noreg) { |
|
5720 |
bind(CLC_template); |
|
5721 |
z_clc(h_csize, 0, Z_R1, n_csize, needle); |
|
5722 |
} |
|
5723 |
} |
|
5724 |
||
5725 |
if (needlecnt != noreg || needle_bytes > 256) { |
|
5726 |
bind(L_clcle); |
|
5727 |
||
5728 |
// Main Loop: clcle version (now we have at least 256 bytes). |
|
5729 |
Label L_OuterLoop, CLC_template; |
|
5730 |
bind(L_OuterLoop); // Search for 1st 2 characters. |
|
5731 |
z_lgr(Z_R1, haycnt); |
|
5732 |
if (h_csize == 1) { |
|
5733 |
MacroAssembler::search_string(Z_R1, result); |
|
5734 |
} else { |
|
5735 |
MacroAssembler::search_string_uni(Z_R1, result); |
|
5736 |
} |
|
5737 |
z_brc(Assembler::bcondNotFound, L_NotFound); |
|
5738 |
||
5739 |
add2reg(Z_R0, n_csize, needle); |
|
5740 |
add2reg(even_reg, h_csize, Z_R1); |
|
5741 |
z_lgr(result, Z_R1); |
|
5742 |
if (needlecnt != noreg) { |
|
5743 |
z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) |
|
5744 |
z_llgfr(odd_reg, needlecnt); |
|
5745 |
} else { |
|
5746 |
load_const_optimized(Z_R1, needle_bytes); |
|
5747 |
if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } |
|
5748 |
} |
|
5749 |
if (h_csize == 1) { |
|
5750 |
compare_long_ext(Z_R0, even_reg, 0); |
|
5751 |
} else { |
|
5752 |
compare_long_uni(Z_R0, even_reg, 0); |
|
5753 |
} |
|
5754 |
z_bre(L_Found); |
|
5755 |
||
5756 |
if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. |
|
5757 |
z_aghi(result, h_csize); // This is the new address we want to use for comparing. |
|
5758 |
z_bru(L_OuterLoop); |
|
5759 |
} |
|
5760 |
} |
|
5761 |
||
5762 |
if (needlecnt != noreg || needlecntval == 1) { |
|
5763 |
bind(L_needle1); |
|
5764 |
||
5765 |
// Single needle character version. |
|
5766 |
if (h_csize == 1) { |
|
5767 |
MacroAssembler::search_string(haycnt, result); |
|
5768 |
} else { |
|
5769 |
MacroAssembler::search_string_uni(haycnt, result); |
|
5770 |
} |
|
5771 |
z_lgr(result, haycnt); |
|
5772 |
z_brc(Assembler::bcondFound, L_Found); |
|
5773 |
} |
|
5774 |
||
5775 |
bind(L_NotFound); |
|
5776 |
add2reg(result, -1, haystack); // Return -1. |
|
5777 |
||
5778 |
bind(L_Found); // Return index (or -1 in fallthrough case). |
|
5779 |
z_sgr(result, haystack); |
|
5780 |
if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } |
|
5781 |
} |
|
5782 |
BLOCK_COMMENT("} string_indexof"); |
|
5783 |
||
5784 |
return offset() - block_start; |
|
5785 |
} |
|
5786 |
||
5787 |
// early clobber: result |
|
5788 |
unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, |
|
5789 |
Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { |
|
5790 |
int block_start = offset(); |
|
5791 |
||
5792 |
BLOCK_COMMENT("string_indexof_char {"); |
|
5793 |
||
5794 |
if (needle == haystack) { |
|
5795 |
z_lhi(result, 0); |
|
5796 |
} else { |
|
5797 |
||
5798 |
Label Ldone; |
|
5799 |
||
5800 |
z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. |
|
5801 |
if (needle == noreg) { |
|
5802 |
load_const_optimized(Z_R0, (unsigned long)needleChar); |
|
5803 |
} else { |
|
5804 |
if (is_byte) { |
|
5805 |
z_llgcr(Z_R0, needle); // First (and only) needle char. |
|
5806 |
} else { |
|
5807 |
z_llghr(Z_R0, needle); // First (and only) needle char. |
|
5808 |
} |
|
5809 |
} |
|
5810 |
||
5811 |
if (!is_byte) { |
|
5812 |
z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. |
|
5813 |
} |
|
5814 |
||
5815 |
z_lgr(even_reg, haystack); // haystack addr |
|
5816 |
z_agr(odd_reg, haystack); // First char after range end. |
|
5817 |
z_lghi(result, -1); |
|
5818 |
||
5819 |
if (is_byte) { |
|
5820 |
MacroAssembler::search_string(odd_reg, even_reg); |
|
5821 |
} else { |
|
5822 |
MacroAssembler::search_string_uni(odd_reg, even_reg); |
|
5823 |
} |
|
5824 |
z_brc(Assembler::bcondNotFound, Ldone); |
|
5825 |
if (is_byte) { |
|
5826 |
if (VM_Version::has_DistinctOpnds()) { |
|
5827 |
z_sgrk(result, odd_reg, haystack); |
|
5828 |
} else { |
|
5829 |
z_sgr(odd_reg, haystack); |
|
5830 |
z_lgr(result, odd_reg); |
|
5831 |
} |
|
5832 |
} else { |
|
5833 |
z_slgr(odd_reg, haystack); |
|
5834 |
z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); |
|
5835 |
} |
|
5836 |
||
5837 |
bind(Ldone); |
|
5838 |
} |
|
5839 |
BLOCK_COMMENT("} string_indexof_char"); |
|
5840 |
||
5841 |
return offset() - block_start; |
|
5842 |
} |
|
58959 | 5843 |
#endif |
42065 | 5844 |
|
5845 |
//------------------------------------------------- |
|
5846 |
// Constants (scalar and oop) in constant pool |
|
5847 |
//------------------------------------------------- |
|
5848 |
||
5849 |
// Add a non-relocated constant to the CP. |
|
5850 |
int MacroAssembler::store_const_in_toc(AddressLiteral& val) { |
|
5851 |
long value = val.value(); |
|
5852 |
address tocPos = long_constant(value); |
|
5853 |
||
5854 |
if (tocPos != NULL) { |
|
5855 |
int tocOffset = (int)(tocPos - code()->consts()->start()); |
|
5856 |
return tocOffset; |
|
5857 |
} |
|
5858 |
// Address_constant returned NULL, so no constant entry has been created. |
|
5859 |
// In that case, we return a "fatal" offset, just in case that subsequently |
|
5860 |
// generated access code is executed. |
|
5861 |
return -1; |
|
5862 |
} |
|
5863 |
||
5864 |
// Returns the TOC offset where the address is stored. |
|
5865 |
// Add a relocated constant to the CP. |
|
5866 |
int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { |
|
5867 |
// Use RelocationHolder::none for the constant pool entry. |
|
5868 |
// Otherwise we will end up with a failing NativeCall::verify(x), |
|
5869 |
// where x is the address of the constant pool entry. |
|
5870 |
address tocPos = address_constant((address)oop.value(), RelocationHolder::none); |
|
5871 |
||
5872 |
if (tocPos != NULL) { |
|
5873 |
int tocOffset = (int)(tocPos - code()->consts()->start()); |
|
5874 |
RelocationHolder rsp = oop.rspec(); |
|
5875 |
Relocation *rel = rsp.reloc(); |
|
5876 |
||
5877 |
// Store toc_offset in relocation, used by call_far_patchable. |
|
5878 |
if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { |
|
5879 |
((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); |
|
5880 |
} |
|
5881 |
// Relocate at the load's pc. |
|
5882 |
relocate(rsp); |
|
5883 |
||
5884 |
return tocOffset; |
|
5885 |
} |
|
5886 |
// Address_constant returned NULL, so no constant entry has been created |
|
5887 |
// in that case, we return a "fatal" offset, just in case that subsequently |
|
5888 |
// generated access code is executed. |
|
5889 |
return -1; |
|
5890 |
} |
|
5891 |
||
5892 |
bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { |
|
5893 |
int tocOffset = store_const_in_toc(a); |
|
5894 |
if (tocOffset == -1) return false; |
|
5895 |
address tocPos = tocOffset + code()->consts()->start(); |
|
5896 |
assert((address)code()->consts()->start() != NULL, "Please add CP address"); |
|
57583
aad50831e169
8228618: s390: c1/c2 fail to add a metadata relocation in the static call stub.
rrich
parents:
55343
diff
changeset
|
5897 |
relocate(a.rspec()); |
42065 | 5898 |
load_long_pcrelative(dst, tocPos); |
5899 |
return true; |
|
5900 |
} |
|
5901 |
||
5902 |
bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { |
|
5903 |
int tocOffset = store_oop_in_toc(a); |
|
5904 |
if (tocOffset == -1) return false; |
|
5905 |
address tocPos = tocOffset + code()->consts()->start(); |
|
5906 |
assert((address)code()->consts()->start() != NULL, "Please add CP address"); |
|
5907 |
||
5908 |
load_addr_pcrelative(dst, tocPos); |
|
5909 |
return true; |
|
5910 |
} |
|
5911 |
||
5912 |
// If the instruction sequence at the given pc is a load_const_from_toc |
|
5913 |
// sequence, return the value currently stored at the referenced position |
|
5914 |
// in the TOC. |
|
5915 |
intptr_t MacroAssembler::get_const_from_toc(address pc) { |
|
5916 |
||
5917 |
assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); |
|
5918 |
||
5919 |
long offset = get_load_const_from_toc_offset(pc); |
|
5920 |
address dataLoc = NULL; |
|
5921 |
if (is_load_const_from_toc_pcrelative(pc)) { |
|
5922 |
dataLoc = pc + offset; |
|
5923 |
} else { |
|
5924 |
CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. |
|
5925 |
assert(cb && cb->is_nmethod(), "sanity"); |
|
5926 |
nmethod* nm = (nmethod*)cb; |
|
5927 |
dataLoc = nm->ctable_begin() + offset; |
|
5928 |
} |
|
5929 |
return *(intptr_t *)dataLoc; |
|
5930 |
} |
|
5931 |
||
5932 |
// If the instruction sequence at the given pc is a load_const_from_toc |
|
5933 |
// sequence, copy the passed-in new_data value into the referenced |
|
5934 |
// position in the TOC. |
|
5935 |
void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { |
|
5936 |
assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); |
|
5937 |
||
5938 |
long offset = MacroAssembler::get_load_const_from_toc_offset(pc); |
|
5939 |
address dataLoc = NULL; |
|
5940 |
if (is_load_const_from_toc_pcrelative(pc)) { |
|
5941 |
dataLoc = pc+offset; |
|
5942 |
} else { |
|
5943 |
nmethod* nm = CodeCache::find_nmethod(pc); |
|
5944 |
assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); |
|
5945 |
dataLoc = nm->ctable_begin() + offset; |
|
5946 |
} |
|
5947 |
if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. |
|
5948 |
*(unsigned long *)dataLoc = new_data; |
|
5949 |
} |
|
5950 |
} |
|
5951 |
||
5952 |
// Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc |
|
5953 |
// site. Verify by calling is_load_const_from_toc() before!! |
|
5954 |
// Offset is +/- 2**32 -> use long. |
|
5955 |
long MacroAssembler::get_load_const_from_toc_offset(address a) { |
|
5956 |
assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); |
|
5957 |
// expected code sequence: |
|
5958 |
// z_lgrl(t, simm32); len = 6 |
|
5959 |
unsigned long inst; |
|
5960 |
unsigned int len = get_instruction(a, &inst); |
|
5961 |
return get_pcrel_offset(inst); |
|
5962 |
} |
|
5963 |
||
5964 |
//********************************************************************************** |
|
5965 |
// inspection of generated instruction sequences for a particular pattern |
|
5966 |
//********************************************************************************** |
|
5967 |
||
5968 |
bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { |
|
5969 |
#ifdef ASSERT |
|
5970 |
unsigned long inst; |
|
5971 |
unsigned int len = get_instruction(a+2, &inst); |
|
5972 |
if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { |
|
5973 |
const int range = 128; |
|
5974 |
Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); |
|
5975 |
VM_Version::z_SIGSEGV(); |
|
5976 |
} |
|
5977 |
#endif |
|
5978 |
// expected code sequence: |
|
5979 |
// z_lgrl(t, relAddr32); len = 6 |
|
5980 |
//TODO: verify accessed data is in CP, if possible. |
|
5981 |
return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. |
|
5982 |
} |
|
5983 |
||
5984 |
bool MacroAssembler::is_load_const_from_toc_call(address a) { |
|
5985 |
return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); |
|
5986 |
} |
|
5987 |
||
5988 |
bool MacroAssembler::is_load_const_call(address a) { |
|
5989 |
return is_load_const(a) && is_call_byregister(a + load_const_size()); |
|
5990 |
} |
|
5991 |
||
5992 |
//------------------------------------------------- |
|
5993 |
// Emitters for some really CICS instructions |
|
5994 |
//------------------------------------------------- |
|
5995 |
||
5996 |
void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { |
|
5997 |
assert(dst->encoding()%2==0, "must be an even/odd register pair"); |
|
5998 |
assert(src->encoding()%2==0, "must be an even/odd register pair"); |
|
5999 |
assert(pad<256, "must be a padding BYTE"); |
|
6000 |
||
6001 |
Label retry; |
|
6002 |
bind(retry); |
|
6003 |
Assembler::z_mvcle(dst, src, pad); |
|
6004 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6005 |
} |
|
6006 |
||
6007 |
void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { |
|
6008 |
assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); |
|
6009 |
assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); |
|
6010 |
assert(pad<256, "must be a padding BYTE"); |
|
6011 |
||
6012 |
Label retry; |
|
6013 |
bind(retry); |
|
6014 |
Assembler::z_clcle(left, right, pad, Z_R0); |
|
6015 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6016 |
} |
|
6017 |
||
6018 |
void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { |
|
6019 |
assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); |
|
6020 |
assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); |
|
6021 |
assert(pad<=0xfff, "must be a padding HALFWORD"); |
|
6022 |
assert(VM_Version::has_ETF2(), "instruction must be available"); |
|
6023 |
||
6024 |
Label retry; |
|
6025 |
bind(retry); |
|
6026 |
Assembler::z_clclu(left, right, pad, Z_R0); |
|
6027 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6028 |
} |
|
6029 |
||
6030 |
void MacroAssembler::search_string(Register end, Register start) { |
|
6031 |
assert(end->encoding() != 0, "end address must not be in R0"); |
|
6032 |
assert(start->encoding() != 0, "start address must not be in R0"); |
|
6033 |
||
6034 |
Label retry; |
|
6035 |
bind(retry); |
|
6036 |
Assembler::z_srst(end, start); |
|
6037 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6038 |
} |
|
6039 |
||
6040 |
void MacroAssembler::search_string_uni(Register end, Register start) { |
|
6041 |
assert(end->encoding() != 0, "end address must not be in R0"); |
|
6042 |
assert(start->encoding() != 0, "start address must not be in R0"); |
|
6043 |
assert(VM_Version::has_ETF3(), "instruction must be available"); |
|
6044 |
||
6045 |
Label retry; |
|
6046 |
bind(retry); |
|
6047 |
Assembler::z_srstu(end, start); |
|
6048 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6049 |
} |
|
6050 |
||
6051 |
void MacroAssembler::kmac(Register srcBuff) { |
|
6052 |
assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); |
|
6053 |
assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); |
|
6054 |
||
6055 |
Label retry; |
|
6056 |
bind(retry); |
|
6057 |
Assembler::z_kmac(Z_R0, srcBuff); |
|
6058 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6059 |
} |
|
6060 |
||
6061 |
void MacroAssembler::kimd(Register srcBuff) { |
|
6062 |
assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); |
|
6063 |
assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); |
|
6064 |
||
6065 |
Label retry; |
|
6066 |
bind(retry); |
|
6067 |
Assembler::z_kimd(Z_R0, srcBuff); |
|
6068 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6069 |
} |
|
6070 |
||
6071 |
void MacroAssembler::klmd(Register srcBuff) { |
|
6072 |
assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); |
|
6073 |
assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); |
|
6074 |
||
6075 |
Label retry; |
|
6076 |
bind(retry); |
|
6077 |
Assembler::z_klmd(Z_R0, srcBuff); |
|
6078 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6079 |
} |
|
6080 |
||
6081 |
void MacroAssembler::km(Register dstBuff, Register srcBuff) { |
|
6082 |
// DstBuff and srcBuff are allowed to be the same register (encryption in-place). |
|
6083 |
// DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. |
|
6084 |
assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); |
|
6085 |
assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); |
|
6086 |
assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); |
|
6087 |
||
6088 |
Label retry; |
|
6089 |
bind(retry); |
|
6090 |
Assembler::z_km(dstBuff, srcBuff); |
|
6091 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6092 |
} |
|
6093 |
||
6094 |
void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { |
|
6095 |
// DstBuff and srcBuff are allowed to be the same register (encryption in-place). |
|
6096 |
// DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. |
|
6097 |
assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); |
|
6098 |
assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); |
|
6099 |
assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); |
|
6100 |
||
6101 |
Label retry; |
|
6102 |
bind(retry); |
|
6103 |
Assembler::z_kmc(dstBuff, srcBuff); |
|
6104 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6105 |
} |
|
6106 |
||
6107 |
void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { |
|
6108 |
assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); |
|
6109 |
||
6110 |
Label retry; |
|
6111 |
bind(retry); |
|
6112 |
Assembler::z_cksm(crcBuff, srcBuff); |
|
6113 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6114 |
} |
|
6115 |
||
6116 |
void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { |
|
6117 |
assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); |
|
6118 |
assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); |
|
6119 |
||
6120 |
Label retry; |
|
6121 |
bind(retry); |
|
6122 |
Assembler::z_troo(r1, r2, m3); |
|
6123 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6124 |
} |
|
6125 |
||
6126 |
void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { |
|
6127 |
assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); |
|
6128 |
assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); |
|
6129 |
||
6130 |
Label retry; |
|
6131 |
bind(retry); |
|
6132 |
Assembler::z_trot(r1, r2, m3); |
|
6133 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6134 |
} |
|
6135 |
||
6136 |
void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { |
|
6137 |
assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); |
|
6138 |
assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); |
|
6139 |
||
6140 |
Label retry; |
|
6141 |
bind(retry); |
|
6142 |
Assembler::z_trto(r1, r2, m3); |
|
6143 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6144 |
} |
|
6145 |
||
6146 |
void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { |
|
6147 |
assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); |
|
6148 |
assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); |
|
6149 |
||
6150 |
Label retry; |
|
6151 |
bind(retry); |
|
6152 |
Assembler::z_trtt(r1, r2, m3); |
|
6153 |
Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); |
|
6154 |
} |
|
6155 |
||
6156 |
//--------------------------------------- |
|
6157 |
// Helpers for Intrinsic Emitters |
|
6158 |
//--------------------------------------- |
|
6159 |
||
6160 |
/** |
|
6161 |
* uint32_t crc; |
|
6162 |
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8); |
|
6163 |
*/ |
|
6164 |
void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { |
|
6165 |
assert_different_registers(crc, table, tmp); |
|
6166 |
assert_different_registers(val, table); |
|
6167 |
if (crc == val) { // Must rotate first to use the unmodified value. |
|
6168 |
rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. |
|
6169 |
z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. |
|
6170 |
} else { |
|
6171 |
z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. |
|
6172 |
rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. |
|
6173 |
} |
|
6174 |
z_x(crc, Address(table, tmp, 0)); |
|
6175 |
} |
|
6176 |
||
6177 |
/** |
|
6178 |
* uint32_t crc; |
|
6179 |
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8); |
|
6180 |
*/ |
|
6181 |
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { |
|
6182 |
fold_byte_crc32(crc, crc, table, tmp); |
|
6183 |
} |
|
6184 |
||
6185 |
/** |
|
6186 |
* Emits code to update CRC-32 with a byte value according to constants in table. |
|
6187 |
* |
|
6188 |
* @param [in,out]crc Register containing the crc. |
|
6189 |
* @param [in]val Register containing the byte to fold into the CRC. |
|
6190 |
* @param [in]table Register containing the table of crc constants. |
|
6191 |
* |
|
6192 |
* uint32_t crc; |
|
6193 |
* val = crc_table[(val ^ crc) & 0xFF]; |
|
6194 |
* crc = val ^ (crc >> 8); |
|
6195 |
*/ |
|
6196 |
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { |
|
6197 |
z_xr(val, crc); |
|
6198 |
fold_byte_crc32(crc, val, table, val); |
|
6199 |
} |
|
6200 |
||
6201 |
||
6202 |
/** |
|
6203 |
* @param crc register containing existing CRC (32-bit) |
|
6204 |
* @param buf register pointing to input byte buffer (byte*) |
|
6205 |
* @param len register containing number of bytes |
|
6206 |
* @param table register pointing to CRC table |
|
6207 |
*/ |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6208 |
void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) { |
42065 | 6209 |
assert_different_registers(crc, buf, len, table, data); |
6210 |
||
6211 |
Label L_mainLoop, L_done; |
|
6212 |
const int mainLoop_stepping = 1; |
|
6213 |
||
6214 |
// Process all bytes in a single-byte loop. |
|
6215 |
z_ltr(len, len); |
|
6216 |
z_brnh(L_done); |
|
6217 |
||
6218 |
bind(L_mainLoop); |
|
6219 |
z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. |
|
6220 |
add2reg(buf, mainLoop_stepping); // Advance buffer position. |
|
6221 |
update_byte_crc32(crc, data, table); |
|
6222 |
z_brct(len, L_mainLoop); // Iterate. |
|
6223 |
||
6224 |
bind(L_done); |
|
6225 |
} |
|
6226 |
||
6227 |
/** |
|
6228 |
* Emits code to update CRC-32 with a 4-byte value according to constants in table. |
|
6229 |
* Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. |
|
6230 |
* |
|
6231 |
*/ |
|
6232 |
void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, |
|
6233 |
Register t0, Register t1, Register t2, Register t3) { |
|
6234 |
// This is what we implement (the DOBIG4 part): |
|
6235 |
// |
|
6236 |
// #define DOBIG4 c ^= *++buf4; \ |
|
6237 |
// c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ |
|
6238 |
// crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] |
|
6239 |
// #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6240 |
// Pre-calculate (constant) column offsets, use columns 4..7 for big-endian. |
42065 | 6241 |
const int ix0 = 4*(4*CRC32_COLUMN_SIZE); |
6242 |
const int ix1 = 5*(4*CRC32_COLUMN_SIZE); |
|
6243 |
const int ix2 = 6*(4*CRC32_COLUMN_SIZE); |
|
6244 |
const int ix3 = 7*(4*CRC32_COLUMN_SIZE); |
|
6245 |
||
6246 |
// XOR crc with next four bytes of buffer. |
|
6247 |
lgr_if_needed(t0, crc); |
|
6248 |
z_x(t0, Address(buf, bufDisp)); |
|
6249 |
if (bufInc != 0) { |
|
6250 |
add2reg(buf, bufInc); |
|
6251 |
} |
|
6252 |
||
6253 |
// Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. |
|
6254 |
rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 |
|
6255 |
rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 |
|
6256 |
rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 |
|
6257 |
rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 |
|
6258 |
||
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6259 |
// XOR indexed table values to calculate updated crc. |
42065 | 6260 |
z_ly(t2, Address(table, t2, (intptr_t)ix1)); |
6261 |
z_ly(t0, Address(table, t0, (intptr_t)ix3)); |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6262 |
z_xy(t2, Address(table, t3, (intptr_t)ix0)); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6263 |
z_xy(t0, Address(table, t1, (intptr_t)ix2)); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6264 |
z_xr(t0, t2); // Now t0 contains the updated CRC value. |
42065 | 6265 |
lgr_if_needed(crc, t0); |
6266 |
} |
|
6267 |
||
6268 |
/** |
|
6269 |
* @param crc register containing existing CRC (32-bit) |
|
6270 |
* @param buf register pointing to input byte buffer (byte*) |
|
6271 |
* @param len register containing number of bytes |
|
6272 |
* @param table register pointing to CRC table |
|
6273 |
* |
|
6274 |
* uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! |
|
6275 |
*/ |
|
6276 |
void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6277 |
Register t0, Register t1, Register t2, Register t3, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6278 |
bool invertCRC) { |
42065 | 6279 |
assert_different_registers(crc, buf, len, table); |
6280 |
||
6281 |
Label L_mainLoop, L_tail; |
|
6282 |
Register data = t0; |
|
6283 |
Register ctr = Z_R0; |
|
6284 |
const int mainLoop_stepping = 4; |
|
6285 |
const int log_stepping = exact_log2(mainLoop_stepping); |
|
6286 |
||
6287 |
// Don't test for len <= 0 here. This pathological case should not occur anyway. |
|
6288 |
// Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. |
|
6289 |
// The situation itself is detected and handled correctly by the conditional branches |
|
6290 |
// following aghi(len, -stepping) and aghi(len, +stepping). |
|
6291 |
||
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6292 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6293 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6294 |
} |
42065 | 6295 |
|
6296 |
// Check for short (<4 bytes) buffer. |
|
6297 |
z_srag(ctr, len, log_stepping); |
|
6298 |
z_brnh(L_tail); |
|
6299 |
||
6300 |
z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. |
|
6301 |
rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop |
|
6302 |
||
6303 |
BIND(L_mainLoop); |
|
6304 |
update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); |
|
6305 |
z_brct(ctr, L_mainLoop); // Iterate. |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6306 |
|
42065 | 6307 |
z_lrvr(crc, crc); // Revert byte order back to original. |
6308 |
||
6309 |
// Process last few (<8) bytes of buffer. |
|
6310 |
BIND(L_tail); |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6311 |
update_byteLoop_crc32(crc, buf, len, table, data); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6312 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6313 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6314 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6315 |
} |
42065 | 6316 |
} |
6317 |
||
6318 |
/** |
|
6319 |
* @param crc register containing existing CRC (32-bit) |
|
6320 |
* @param buf register pointing to input byte buffer (byte*) |
|
6321 |
* @param len register containing number of bytes |
|
6322 |
* @param table register pointing to CRC table |
|
6323 |
*/ |
|
6324 |
void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6325 |
Register t0, Register t1, Register t2, Register t3, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6326 |
bool invertCRC) { |
42065 | 6327 |
assert_different_registers(crc, buf, len, table); |
6328 |
Register data = t0; |
|
6329 |
||
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6330 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6331 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6332 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6333 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6334 |
update_byteLoop_crc32(crc, buf, len, table, data); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6335 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6336 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6337 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6338 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6339 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6340 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6341 |
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6342 |
bool invertCRC) { |
42065 | 6343 |
assert_different_registers(crc, buf, len, table, tmp); |
6344 |
||
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6345 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6346 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6347 |
} |
42065 | 6348 |
|
6349 |
z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. |
|
6350 |
update_byte_crc32(crc, tmp, table); |
|
6351 |
||
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6352 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6353 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6354 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6355 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6356 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6357 |
void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6358 |
bool invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6359 |
assert_different_registers(crc, val, table); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6360 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6361 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6362 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6363 |
} |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6364 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6365 |
update_byte_crc32(crc, val, table); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6366 |
|
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6367 |
if (invertCRC) { |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6368 |
not_(crc, noreg, false); // 1s complement of crc |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
46289
diff
changeset
|
6369 |
} |
42065 | 6370 |
} |
6371 |
||
6372 |
// |
|
6373 |
// Code for BigInteger::multiplyToLen() intrinsic. |
|
6374 |
// |
|
6375 |
||
6376 |
// dest_lo += src1 + src2 |
|
6377 |
// dest_hi += carry1 + carry2 |
|
6378 |
// Z_R7 is destroyed ! |
|
6379 |
void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, |
|
6380 |
Register src1, Register src2) { |
|
6381 |
clear_reg(Z_R7); |
|
6382 |
z_algr(dest_lo, src1); |
|
6383 |
z_alcgr(dest_hi, Z_R7); |
|
6384 |
z_algr(dest_lo, src2); |
|
6385 |
z_alcgr(dest_hi, Z_R7); |
|
6386 |
} |
|
6387 |
||
6388 |
// Multiply 64 bit by 64 bit first loop. |
|
6389 |
void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, |
|
6390 |
Register x_xstart, |
|
6391 |
Register y, Register y_idx, |
|
6392 |
Register z, |
|
6393 |
Register carry, |
|
6394 |
Register product, |
|
6395 |
Register idx, Register kdx) { |
|
6396 |
// jlong carry, x[], y[], z[]; |
|
6397 |
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { |
|
6398 |
// huge_128 product = y[idx] * x[xstart] + carry; |
|
6399 |
// z[kdx] = (jlong)product; |
|
6400 |
// carry = (jlong)(product >>> 64); |
|
6401 |
// } |
|
6402 |
// z[xstart] = carry; |
|
6403 |
||
6404 |
Label L_first_loop, L_first_loop_exit; |
|
6405 |
Label L_one_x, L_one_y, L_multiply; |
|
6406 |
||
6407 |
z_aghi(xstart, -1); |
|
6408 |
z_brl(L_one_x); // Special case: length of x is 1. |
|
6409 |
||
6410 |
// Load next two integers of x. |
|
6411 |
z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); |
|
6412 |
mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); |
|
6413 |
||
6414 |
||
6415 |
bind(L_first_loop); |
|
6416 |
||
6417 |
z_aghi(idx, -1); |
|
6418 |
z_brl(L_first_loop_exit); |
|
6419 |
z_aghi(idx, -1); |
|
6420 |
z_brl(L_one_y); |
|
6421 |
||
6422 |
// Load next two integers of y. |
|
6423 |
z_sllg(Z_R1_scratch, idx, LogBytesPerInt); |
|
6424 |
mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); |
|
6425 |
||
6426 |
||
6427 |
bind(L_multiply); |
|
6428 |
||
6429 |
Register multiplicand = product->successor(); |
|
6430 |
Register product_low = multiplicand; |
|
6431 |
||
6432 |
lgr_if_needed(multiplicand, x_xstart); |
|
6433 |
z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand |
|
6434 |
clear_reg(Z_R7); |
|
6435 |
z_algr(product_low, carry); // Add carry to result. |
|
6436 |
z_alcgr(product, Z_R7); // Add carry of the last addition. |
|
6437 |
add2reg(kdx, -2); |
|
6438 |
||
6439 |
// Store result. |
|
6440 |
z_sllg(Z_R7, kdx, LogBytesPerInt); |
|
6441 |
reg2mem_opt(product_low, Address(z, Z_R7, 0)); |
|
6442 |
lgr_if_needed(carry, product); |
|
6443 |
z_bru(L_first_loop); |
|
6444 |
||
6445 |
||
6446 |
bind(L_one_y); // Load one 32 bit portion of y as (0,value). |
|
6447 |
||
6448 |
clear_reg(y_idx); |
|
6449 |
mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); |
|
6450 |
z_bru(L_multiply); |
|
6451 |
||
6452 |
||
6453 |
bind(L_one_x); // Load one 32 bit portion of x as (0,value). |
|
6454 |
||
6455 |
clear_reg(x_xstart); |
|
6456 |
mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); |
|
6457 |
z_bru(L_first_loop); |
|
6458 |
||
6459 |
bind(L_first_loop_exit); |
|
6460 |
} |
|
6461 |
||
6462 |
// Multiply 64 bit by 64 bit and add 128 bit. |
|
6463 |
void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, |
|
6464 |
Register z, |
|
6465 |
Register yz_idx, Register idx, |
|
6466 |
Register carry, Register product, |
|
6467 |
int offset) { |
|
6468 |
// huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; |
|
6469 |
// z[kdx] = (jlong)product; |
|
6470 |
||
6471 |
Register multiplicand = product->successor(); |
|
6472 |
Register product_low = multiplicand; |
|
6473 |
||
6474 |
z_sllg(Z_R7, idx, LogBytesPerInt); |
|
6475 |
mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); |
|
6476 |
||
6477 |
lgr_if_needed(multiplicand, x_xstart); |
|
6478 |
z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand |
|
6479 |
mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); |
|
6480 |
||
6481 |
add2_with_carry(product, product_low, carry, yz_idx); |
|
6482 |
||
6483 |
z_sllg(Z_R7, idx, LogBytesPerInt); |
|
6484 |
reg2mem_opt(product_low, Address(z, Z_R7, offset)); |
|
6485 |
||
6486 |
} |
|
6487 |
||
6488 |
// Multiply 128 bit by 128 bit. Unrolled inner loop. |
|
6489 |
void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, |
|
6490 |
Register y, Register z, |
|
6491 |
Register yz_idx, Register idx, |
|
6492 |
Register jdx, |
|
6493 |
Register carry, Register product, |
|
6494 |
Register carry2) { |
|
6495 |
// jlong carry, x[], y[], z[]; |
|
6496 |
// int kdx = ystart+1; |
|
6497 |
// for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop |
|
6498 |
// huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; |
|
6499 |
// z[kdx+idx+1] = (jlong)product; |
|
6500 |
// jlong carry2 = (jlong)(product >>> 64); |
|
6501 |
// product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; |
|
6502 |
// z[kdx+idx] = (jlong)product; |
|
6503 |
// carry = (jlong)(product >>> 64); |
|
6504 |
// } |
|
6505 |
// idx += 2; |
|
6506 |
// if (idx > 0) { |
|
6507 |
// product = (y[idx] * x_xstart) + z[kdx+idx] + carry; |
|
6508 |
// z[kdx+idx] = (jlong)product; |
|
6509 |
// carry = (jlong)(product >>> 64); |
|
6510 |
// } |
|
6511 |
||
6512 |
Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; |
|
6513 |
||
6514 |
// scale the index |
|
6515 |
lgr_if_needed(jdx, idx); |
|
6516 |
and_imm(jdx, 0xfffffffffffffffcL); |
|
6517 |
rshift(jdx, 2); |
|
6518 |
||
6519 |
||
6520 |
bind(L_third_loop); |
|
6521 |
||
6522 |
z_aghi(jdx, -1); |
|
6523 |
z_brl(L_third_loop_exit); |
|
6524 |
add2reg(idx, -4); |
|
6525 |
||
6526 |
multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); |
|
6527 |
lgr_if_needed(carry2, product); |
|
6528 |
||
6529 |
multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); |
|
6530 |
lgr_if_needed(carry, product); |
|
6531 |
z_bru(L_third_loop); |
|
6532 |
||
6533 |
||
6534 |
bind(L_third_loop_exit); // Handle any left-over operand parts. |
|
6535 |
||
6536 |
and_imm(idx, 0x3); |
|
6537 |
z_brz(L_post_third_loop_done); |
|
6538 |
||
6539 |
Label L_check_1; |
|
6540 |
||
6541 |
z_aghi(idx, -2); |
|
6542 |
z_brl(L_check_1); |
|
6543 |
||
6544 |
multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); |
|
6545 |
lgr_if_needed(carry, product); |
|
6546 |
||
6547 |
||
6548 |
bind(L_check_1); |
|
6549 |
||
6550 |
add2reg(idx, 0x2); |
|
6551 |
and_imm(idx, 0x1); |
|
6552 |
z_aghi(idx, -1); |
|
6553 |
z_brl(L_post_third_loop_done); |
|
6554 |
||
6555 |
Register multiplicand = product->successor(); |
|
6556 |
Register product_low = multiplicand; |
|
6557 |
||
6558 |
z_sllg(Z_R7, idx, LogBytesPerInt); |
|
6559 |
clear_reg(yz_idx); |
|
6560 |
mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); |
|
6561 |
lgr_if_needed(multiplicand, x_xstart); |
|
6562 |
z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand |
|
6563 |
clear_reg(yz_idx); |
|
6564 |
mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); |
|
6565 |
||
6566 |
add2_with_carry(product, product_low, yz_idx, carry); |
|
6567 |
||
6568 |
z_sllg(Z_R7, idx, LogBytesPerInt); |
|
6569 |
reg2mem_opt(product_low, Address(z, Z_R7, 0), false); |
|
6570 |
rshift(product_low, 32); |
|
6571 |
||
6572 |
lshift(product, 32); |
|
6573 |
z_ogr(product_low, product); |
|
6574 |
lgr_if_needed(carry, product_low); |
|
6575 |
||
6576 |
bind(L_post_third_loop_done); |
|
6577 |
} |
|
6578 |
||
6579 |
void MacroAssembler::multiply_to_len(Register x, Register xlen, |
|
6580 |
Register y, Register ylen, |
|
6581 |
Register z, |
|
6582 |
Register tmp1, Register tmp2, |
|
6583 |
Register tmp3, Register tmp4, |
|
6584 |
Register tmp5) { |
|
6585 |
ShortBranchVerifier sbv(this); |
|
6586 |
||
6587 |
assert_different_registers(x, xlen, y, ylen, z, |
|
6588 |
tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); |
|
6589 |
assert_different_registers(x, xlen, y, ylen, z, |
|
6590 |
tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); |
|
6591 |
||
6592 |
z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); |
|
6593 |
||
6594 |
// In openJdk, we store the argument as 32-bit value to slot. |
|
6595 |
Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. |
|
6596 |
||
6597 |
const Register idx = tmp1; |
|
6598 |
const Register kdx = tmp2; |
|
6599 |
const Register xstart = tmp3; |
|
6600 |
||
6601 |
const Register y_idx = tmp4; |
|
6602 |
const Register carry = tmp5; |
|
6603 |
const Register product = Z_R0_scratch; |
|
6604 |
const Register x_xstart = Z_R8; |
|
6605 |
||
6606 |
// First Loop. |
|
6607 |
// |
|
6608 |
// final static long LONG_MASK = 0xffffffffL; |
|
6609 |
// int xstart = xlen - 1; |
|
6610 |
// int ystart = ylen - 1; |
|
6611 |
// long carry = 0; |
|
6612 |
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { |
|
6613 |
// long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; |
|
6614 |
// z[kdx] = (int)product; |
|
6615 |
// carry = product >>> 32; |
|
6616 |
// } |
|
6617 |
// z[xstart] = (int)carry; |
|
6618 |
// |
|
6619 |
||
6620 |
lgr_if_needed(idx, ylen); // idx = ylen |
|
6621 |
z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. |
|
6622 |
clear_reg(carry); // carry = 0 |
|
6623 |
||
6624 |
Label L_done; |
|
6625 |
||
6626 |
lgr_if_needed(xstart, xlen); |
|
6627 |
z_aghi(xstart, -1); |
|
6628 |
z_brl(L_done); |
|
6629 |
||
6630 |
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); |
|
6631 |
||
6632 |
NearLabel L_second_loop; |
|
6633 |
compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); |
|
6634 |
||
6635 |
NearLabel L_carry; |
|
6636 |
z_aghi(kdx, -1); |
|
6637 |
z_brz(L_carry); |
|
6638 |
||
6639 |
// Store lower 32 bits of carry. |
|
6640 |
z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); |
|
6641 |
reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); |
|
6642 |
rshift(carry, 32); |
|
6643 |
z_aghi(kdx, -1); |
|
6644 |
||
6645 |
||
6646 |
bind(L_carry); |
|
6647 |
||
6648 |
// Store upper 32 bits of carry. |
|
6649 |
z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); |
|
6650 |
reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); |
|
6651 |
||
6652 |
// Second and third (nested) loops. |
|
6653 |
// |
|
6654 |
// for (int i = xstart-1; i >= 0; i--) { // Second loop |
|
6655 |
// carry = 0; |
|
6656 |
// for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop |
|
6657 |
// long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + |
|
6658 |
// (z[k] & LONG_MASK) + carry; |
|
6659 |
// z[k] = (int)product; |
|
6660 |
// carry = product >>> 32; |
|
6661 |
// } |
|
6662 |
// z[i] = (int)carry; |
|
6663 |
// } |
|
6664 |
// |
|
6665 |
// i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx |
|
6666 |
||
6667 |
const Register jdx = tmp1; |
|
6668 |
||
6669 |
bind(L_second_loop); |
|
6670 |
||
6671 |
clear_reg(carry); // carry = 0; |
|
6672 |
lgr_if_needed(jdx, ylen); // j = ystart+1 |
|
6673 |
||
6674 |
z_aghi(xstart, -1); // i = xstart-1; |
|
6675 |
z_brl(L_done); |
|
6676 |
||
6677 |
// Use free slots in the current stackframe instead of push/pop. |
|
6678 |
Address zsave(Z_SP, _z_abi(carg_1)); |
|
6679 |
reg2mem_opt(z, zsave); |
|
6680 |
||
6681 |
||
6682 |
Label L_last_x; |
|
6683 |
||
6684 |
z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); |
|
6685 |
load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j |
|
6686 |
z_aghi(xstart, -1); // i = xstart-1; |
|
6687 |
z_brl(L_last_x); |
|
6688 |
||
6689 |
z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); |
|
6690 |
mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); |
|
6691 |
||
6692 |
||
6693 |
Label L_third_loop_prologue; |
|
6694 |
||
6695 |
bind(L_third_loop_prologue); |
|
6696 |
||
6697 |
Address xsave(Z_SP, _z_abi(carg_2)); |
|
6698 |
Address xlensave(Z_SP, _z_abi(carg_3)); |
|
6699 |
Address ylensave(Z_SP, _z_abi(carg_4)); |
|
6700 |
||
6701 |
reg2mem_opt(x, xsave); |
|
6702 |
reg2mem_opt(xstart, xlensave); |
|
6703 |
reg2mem_opt(ylen, ylensave); |
|
6704 |
||
6705 |
||
6706 |
multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); |
|
6707 |
||
6708 |
mem2reg_opt(z, zsave); |
|
6709 |
mem2reg_opt(x, xsave); |
|
6710 |
mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! |
|
6711 |
mem2reg_opt(ylen, ylensave); |
|
6712 |
||
6713 |
add2reg(tmp3, 1, xlen); |
|
6714 |
z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); |
|
6715 |
reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); |
|
6716 |
z_aghi(tmp3, -1); |
|
6717 |
z_brl(L_done); |
|
6718 |
||
6719 |
rshift(carry, 32); |
|
6720 |
z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); |
|
6721 |
reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); |
|
6722 |
z_bru(L_second_loop); |
|
6723 |
||
6724 |
// Next infrequent code is moved outside loops. |
|
6725 |
bind(L_last_x); |
|
6726 |
||
6727 |
clear_reg(x_xstart); |
|
6728 |
mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); |
|
6729 |
z_bru(L_third_loop_prologue); |
|
6730 |
||
6731 |
bind(L_done); |
|
6732 |
||
6733 |
z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); |
|
6734 |
} |
|
6735 |
||
6736 |
#ifndef PRODUCT |
|
6737 |
// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). |
|
6738 |
void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { |
|
6739 |
Label ok; |
|
6740 |
if (check_equal) { |
|
6741 |
z_bre(ok); |
|
6742 |
} else { |
|
6743 |
z_brne(ok); |
|
6744 |
} |
|
6745 |
stop(msg, id); |
|
6746 |
bind(ok); |
|
6747 |
} |
|
6748 |
||
6749 |
// Assert if CC indicates "low". |
|
6750 |
void MacroAssembler::asm_assert_low(const char *msg, int id) { |
|
6751 |
Label ok; |
|
6752 |
z_brnl(ok); |
|
6753 |
stop(msg, id); |
|
6754 |
bind(ok); |
|
6755 |
} |
|
6756 |
||
6757 |
// Assert if CC indicates "high". |
|
6758 |
void MacroAssembler::asm_assert_high(const char *msg, int id) { |
|
6759 |
Label ok; |
|
6760 |
z_brnh(ok); |
|
6761 |
stop(msg, id); |
|
6762 |
bind(ok); |
|
6763 |
} |
|
6764 |
||
6765 |
// Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) |
|
6766 |
// generate non-relocatable code. |
|
6767 |
void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { |
|
6768 |
Label ok; |
|
6769 |
if (check_equal) { z_bre(ok); } |
|
6770 |
else { z_brne(ok); } |
|
6771 |
stop_static(msg, id); |
|
6772 |
bind(ok); |
|
6773 |
} |
|
6774 |
||
6775 |
void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, |
|
6776 |
Register mem_base, const char* msg, int id) { |
|
6777 |
switch (size) { |
|
6778 |
case 4: |
|
6779 |
load_and_test_int(Z_R0, Address(mem_base, mem_offset)); |
|
6780 |
break; |
|
6781 |
case 8: |
|
6782 |
load_and_test_long(Z_R0, Address(mem_base, mem_offset)); |
|
6783 |
break; |
|
6784 |
default: |
|
6785 |
ShouldNotReachHere(); |
|
6786 |
} |
|
6787 |
if (allow_relocation) { asm_assert(check_equal, msg, id); } |
|
6788 |
else { asm_assert_static(check_equal, msg, id); } |
|
6789 |
} |
|
6790 |
||
6791 |
// Check the condition |
|
6792 |
// expected_size == FP - SP |
|
6793 |
// after transformation: |
|
6794 |
// expected_size - FP + SP == 0 |
|
6795 |
// Destroys Register expected_size if no tmp register is passed. |
|
6796 |
void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { |
|
6797 |
if (tmp == noreg) { |
|
6798 |
tmp = expected_size; |
|
6799 |
} else { |
|
6800 |
if (tmp != expected_size) { |
|
6801 |
z_lgr(tmp, expected_size); |
|
6802 |
} |
|
6803 |
z_algr(tmp, Z_SP); |
|
6804 |
z_slg(tmp, 0, Z_R0, Z_SP); |
|
6805 |
asm_assert_eq(msg, id); |
|
6806 |
} |
|
6807 |
} |
|
6808 |
#endif // !PRODUCT |
|
6809 |
||
6810 |
void MacroAssembler::verify_thread() { |
|
6811 |
if (VerifyThread) { |
|
6812 |
unimplemented("", 117); |
|
6813 |
} |
|
6814 |
} |
|
6815 |
||
6816 |
// Plausibility check for oops. |
|
6817 |
void MacroAssembler::verify_oop(Register oop, const char* msg) { |
|
6818 |
if (!VerifyOops) return; |
|
6819 |
||
6820 |
BLOCK_COMMENT("verify_oop {"); |
|
6821 |
Register tmp = Z_R0; |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6822 |
unsigned int nbytes_save = 5*BytesPerWord; |
42065 | 6823 |
address entry = StubRoutines::verify_oop_subroutine_entry_address(); |
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6824 |
|
42065 | 6825 |
save_return_pc(); |
6826 |
push_frame_abi160(nbytes_save); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6827 |
z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); |
42065 | 6828 |
|
6829 |
z_lgr(Z_ARG2, oop); |
|
6830 |
load_const(Z_ARG1, (address) msg); |
|
6831 |
load_const(Z_R1, entry); |
|
6832 |
z_lg(Z_R1, 0, Z_R1); |
|
6833 |
call_c(Z_R1); |
|
6834 |
||
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6835 |
z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); |
42065 | 6836 |
pop_frame(); |
6837 |
restore_return_pc(); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6838 |
|
42065 | 6839 |
BLOCK_COMMENT("} verify_oop "); |
6840 |
} |
|
6841 |
||
6842 |
const char* MacroAssembler::stop_types[] = { |
|
6843 |
"stop", |
|
6844 |
"untested", |
|
6845 |
"unimplemented", |
|
6846 |
"shouldnotreachhere" |
|
6847 |
}; |
|
6848 |
||
6849 |
static void stop_on_request(const char* tp, const char* msg) { |
|
6850 |
tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); |
|
6851 |
guarantee(false, "Z assembly code requires stop: %s", msg); |
|
6852 |
} |
|
6853 |
||
6854 |
void MacroAssembler::stop(int type, const char* msg, int id) { |
|
6855 |
BLOCK_COMMENT(err_msg("stop: %s {", msg)); |
|
6856 |
||
6857 |
// Setup arguments. |
|
6858 |
load_const(Z_ARG1, (void*) stop_types[type%stop_end]); |
|
6859 |
load_const(Z_ARG2, (void*) msg); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6860 |
get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
6861 |
save_return_pc(); // Saves return pc Z_R14. |
42065 | 6862 |
push_frame_abi160(0); |
6863 |
call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); |
|
6864 |
// The plain disassembler does not recognize illtrap. It instead displays |
|
6865 |
// a 32-bit value. Issueing two illtraps assures the disassembler finds |
|
6866 |
// the proper beginning of the next instruction. |
|
6867 |
z_illtrap(); // Illegal instruction. |
|
6868 |
z_illtrap(); // Illegal instruction. |
|
6869 |
||
6870 |
BLOCK_COMMENT(" } stop"); |
|
6871 |
} |
|
6872 |
||
6873 |
// Special version of stop() for code size reduction. |
|
6874 |
// Reuses the previously generated call sequence, if any. |
|
6875 |
// Generates the call sequence on its own, if necessary. |
|
6876 |
// Note: This code will work only in non-relocatable code! |
|
6877 |
// The relative address of the data elements (arg1, arg2) must not change. |
|
6878 |
// The reentry point must not move relative to it's users. This prerequisite |
|
6879 |
// should be given for "hand-written" code, if all chain calls are in the same code blob. |
|
6880 |
// Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. |
|
6881 |
address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { |
|
6882 |
BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); |
|
6883 |
||
6884 |
// Setup arguments. |
|
6885 |
if (allow_relocation) { |
|
6886 |
// Relocatable version (for comparison purposes). Remove after some time. |
|
6887 |
load_const(Z_ARG1, (void*) stop_types[type%stop_end]); |
|
6888 |
load_const(Z_ARG2, (void*) msg); |
|
6889 |
} else { |
|
6890 |
load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); |
|
6891 |
load_absolute_address(Z_ARG2, (address)msg); |
|
6892 |
} |
|
6893 |
if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { |
|
6894 |
BLOCK_COMMENT("branch to reentry point:"); |
|
6895 |
z_brc(bcondAlways, reentry); |
|
6896 |
} else { |
|
6897 |
BLOCK_COMMENT("reentry point:"); |
|
6898 |
reentry = pc(); // Re-entry point for subsequent stop calls. |
|
6899 |
save_return_pc(); // Saves return pc Z_R14. |
|
6900 |
push_frame_abi160(0); |
|
6901 |
if (allow_relocation) { |
|
6902 |
reentry = NULL; // Prevent reentry if code relocation is allowed. |
|
6903 |
call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); |
|
6904 |
} else { |
|
6905 |
call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); |
|
6906 |
} |
|
6907 |
z_illtrap(); // Illegal instruction as emergency stop, should the above call return. |
|
6908 |
} |
|
6909 |
BLOCK_COMMENT(" } stop_chain"); |
|
6910 |
||
6911 |
return reentry; |
|
6912 |
} |
|
6913 |
||
6914 |
// Special version of stop() for code size reduction. |
|
6915 |
// Assumes constant relative addresses for data and runtime call. |
|
6916 |
void MacroAssembler::stop_static(int type, const char* msg, int id) { |
|
6917 |
stop_chain(NULL, type, msg, id, false); |
|
6918 |
} |
|
6919 |
||
6920 |
void MacroAssembler::stop_subroutine() { |
|
6921 |
unimplemented("stop_subroutine", 710); |
|
6922 |
} |
|
6923 |
||
6924 |
// Prints msg to stdout from within generated code.. |
|
6925 |
void MacroAssembler::warn(const char* msg) { |
|
6926 |
RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); |
|
6927 |
load_absolute_address(Z_R1, (address) warning); |
|
6928 |
load_absolute_address(Z_ARG1, (address) msg); |
|
6929 |
(void) call(Z_R1); |
|
6930 |
RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); |
|
6931 |
} |
|
6932 |
||
6933 |
#ifndef PRODUCT |
|
6934 |
||
6935 |
// Write pattern 0x0101010101010101 in region [low-before, high+after]. |
|
6936 |
void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { |
|
6937 |
if (!ZapEmptyStackFields) return; |
|
6938 |
BLOCK_COMMENT("zap memory region {"); |
|
6939 |
load_const_optimized(val, 0x0101010101010101); |
|
6940 |
int size = before + after; |
|
6941 |
if (low == high && size < 5 && size > 0) { |
|
6942 |
int offset = -before*BytesPerWord; |
|
6943 |
for (int i = 0; i < size; ++i) { |
|
6944 |
z_stg(val, Address(low, offset)); |
|
6945 |
offset +=(1*BytesPerWord); |
|
6946 |
} |
|
6947 |
} else { |
|
6948 |
add2reg(addr, -before*BytesPerWord, low); |
|
6949 |
if (after) { |
|
6950 |
#ifdef ASSERT |
|
6951 |
jlong check = after * BytesPerWord; |
|
6952 |
assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); |
|
6953 |
#endif |
|
6954 |
add2reg(high, after * BytesPerWord); |
|
6955 |
} |
|
6956 |
NearLabel loop; |
|
6957 |
bind(loop); |
|
6958 |
z_stg(val, Address(addr)); |
|
6959 |
add2reg(addr, 8); |
|
6960 |
compare64_and_branch(addr, high, bcondNotHigh, loop); |
|
6961 |
if (after) { |
|
6962 |
add2reg(high, -after * BytesPerWord); |
|
6963 |
} |
|
6964 |
} |
|
6965 |
BLOCK_COMMENT("} zap memory region"); |
|
6966 |
} |
|
6967 |
#endif // !PRODUCT |
|
6968 |
||
6969 |
SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { |
|
6970 |
_masm = masm; |
|
6971 |
_masm->load_absolute_address(_rscratch, (address)flag_addr); |
|
6972 |
_masm->load_and_test_int(_rscratch, Address(_rscratch)); |
|
6973 |
if (value) { |
|
6974 |
_masm->z_brne(_label); // Skip if true, i.e. != 0. |
|
6975 |
} else { |
|
6976 |
_masm->z_bre(_label); // Skip if false, i.e. == 0. |
|
6977 |
} |
|
6978 |
} |
|
6979 |
||
6980 |
SkipIfEqual::~SkipIfEqual() { |
|
6981 |
_masm->bind(_label); |
|
6982 |
} |