29183
|
1 |
/*
|
|
2 |
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#ifndef CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
|
27 |
#define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
|
28 |
|
|
29 |
#include "asm/assembler.hpp"
|
|
30 |
#include "memory/allocation.hpp"
|
|
31 |
#include "runtime/icache.hpp"
|
|
32 |
#include "runtime/os.hpp"
|
|
33 |
#include "utilities/top.hpp"
|
|
34 |
|
|
35 |
// We have interfaces for the following instructions:
|
|
36 |
// - NativeInstruction
|
|
37 |
// - - NativeCall
|
|
38 |
// - - NativeMovConstReg
|
|
39 |
// - - NativeMovConstRegPatching
|
|
40 |
// - - NativeMovRegMem
|
|
41 |
// - - NativeMovRegMemPatching
|
|
42 |
// - - NativeJump
|
|
43 |
// - - NativeIllegalOpCode
|
|
44 |
// - - NativeGeneralJump
|
|
45 |
// - - NativeReturn
|
|
46 |
// - - NativeReturnX (return with argument)
|
|
47 |
// - - NativePushConst
|
|
48 |
// - - NativeTstRegMem
|
|
49 |
|
|
50 |
// The base class for different kinds of native instruction abstractions.
|
|
51 |
// Provides the primitive operations to manipulate code relative to this.
|
|
52 |
|
|
53 |
class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
|
54 |
friend class Relocation;
|
|
55 |
friend bool is_NativeCallTrampolineStub_at(address);
|
|
56 |
public:
|
|
57 |
enum { instruction_size = 4 };
|
|
58 |
inline bool is_nop();
|
|
59 |
inline bool is_illegal();
|
|
60 |
inline bool is_return();
|
|
61 |
bool is_jump();
|
|
62 |
inline bool is_jump_or_nop();
|
|
63 |
inline bool is_cond_jump();
|
|
64 |
bool is_safepoint_poll();
|
|
65 |
bool is_movz();
|
|
66 |
bool is_movk();
|
|
67 |
bool is_sigill_zombie_not_entrant();
|
|
68 |
|
|
69 |
protected:
|
|
70 |
address addr_at(int offset) const { return address(this) + offset; }
|
|
71 |
|
|
72 |
s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
|
|
73 |
u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
|
|
74 |
|
|
75 |
jint int_at(int offset) const { return *(jint*) addr_at(offset); }
|
|
76 |
juint uint_at(int offset) const { return *(juint*) addr_at(offset); }
|
|
77 |
|
|
78 |
address ptr_at(int offset) const { return *(address*) addr_at(offset); }
|
|
79 |
|
|
80 |
oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
|
|
81 |
|
|
82 |
|
|
83 |
void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
|
|
84 |
void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
|
|
85 |
void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
|
|
86 |
void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; }
|
|
87 |
void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; }
|
|
88 |
|
|
89 |
public:
|
|
90 |
|
|
91 |
// unit test stuff
|
|
92 |
static void test() {} // override for testing
|
|
93 |
|
|
94 |
inline friend NativeInstruction* nativeInstruction_at(address address);
|
|
95 |
|
|
96 |
static bool is_adrp_at(address instr);
|
|
97 |
static bool is_ldr_literal_at(address instr);
|
|
98 |
static bool is_ldrw_to_zr(address instr);
|
|
99 |
|
35148
|
100 |
static bool is_call_at(address instr) {
|
|
101 |
const uint32_t insn = (*(uint32_t*)instr);
|
|
102 |
return (insn >> 26) == 0b100101;
|
|
103 |
}
|
|
104 |
bool is_call() {
|
|
105 |
return is_call_at(addr_at(0));
|
|
106 |
}
|
|
107 |
|
29183
|
108 |
static bool maybe_cpool_ref(address instr) {
|
|
109 |
return is_adrp_at(instr) || is_ldr_literal_at(instr);
|
|
110 |
}
|
33193
|
111 |
|
|
112 |
bool is_Membar() {
|
|
113 |
unsigned int insn = uint_at(0);
|
|
114 |
return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
|
|
115 |
Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
|
|
116 |
}
|
29183
|
117 |
};
|
|
118 |
|
|
119 |
inline NativeInstruction* nativeInstruction_at(address address) {
|
|
120 |
return (NativeInstruction*)address;
|
|
121 |
}
|
|
122 |
|
|
123 |
// The natural type of an AArch64 instruction is uint32_t
|
|
124 |
inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
|
|
125 |
return (NativeInstruction*)address;
|
|
126 |
}
|
|
127 |
|
|
128 |
inline NativeCall* nativeCall_at(address address);
|
|
129 |
// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
|
|
130 |
// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
|
|
131 |
|
|
132 |
class NativeCall: public NativeInstruction {
|
|
133 |
public:
|
|
134 |
enum Aarch64_specific_constants {
|
|
135 |
instruction_size = 4,
|
|
136 |
instruction_offset = 0,
|
|
137 |
displacement_offset = 0,
|
|
138 |
return_address_offset = 4
|
|
139 |
};
|
|
140 |
|
|
141 |
enum { cache_line_size = BytesPerWord }; // conservative estimate!
|
|
142 |
address instruction_address() const { return addr_at(instruction_offset); }
|
|
143 |
address next_instruction_address() const { return addr_at(return_address_offset); }
|
|
144 |
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
|
|
145 |
address displacement_address() const { return addr_at(displacement_offset); }
|
|
146 |
address return_address() const { return addr_at(return_address_offset); }
|
|
147 |
address destination() const;
|
|
148 |
|
|
149 |
void set_destination(address dest) {
|
|
150 |
int offset = dest - instruction_address();
|
|
151 |
unsigned int insn = 0b100101 << 26;
|
|
152 |
assert((offset & 3) == 0, "should be");
|
|
153 |
offset >>= 2;
|
|
154 |
offset &= (1 << 26) - 1; // mask off insn part
|
|
155 |
insn |= offset;
|
|
156 |
set_int_at(displacement_offset, insn);
|
|
157 |
}
|
|
158 |
|
|
159 |
void verify_alignment() { ; }
|
|
160 |
void verify();
|
|
161 |
void print();
|
|
162 |
|
|
163 |
// Creation
|
|
164 |
inline friend NativeCall* nativeCall_at(address address);
|
|
165 |
inline friend NativeCall* nativeCall_before(address return_address);
|
|
166 |
|
|
167 |
static bool is_call_before(address return_address) {
|
|
168 |
return is_call_at(return_address - NativeCall::return_address_offset);
|
|
169 |
}
|
|
170 |
|
|
171 |
// MT-safe patching of a call instruction.
|
|
172 |
static void insert(address code_pos, address entry);
|
|
173 |
|
|
174 |
static void replace_mt_safe(address instr_addr, address code_buffer);
|
|
175 |
|
|
176 |
// Similar to replace_mt_safe, but just changes the destination. The
|
|
177 |
// important thing is that free-running threads are able to execute
|
|
178 |
// this call instruction at all times. If the call is an immediate BL
|
|
179 |
// instruction we can simply rely on atomicity of 32-bit writes to
|
|
180 |
// make sure other threads will see no intermediate states.
|
|
181 |
|
|
182 |
// We cannot rely on locks here, since the free-running threads must run at
|
|
183 |
// full speed.
|
|
184 |
//
|
|
185 |
// Used in the runtime linkage of calls; see class CompiledIC.
|
|
186 |
// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
|
|
187 |
|
|
188 |
// The parameter assert_lock disables the assertion during code generation.
|
|
189 |
void set_destination_mt_safe(address dest, bool assert_lock = true);
|
|
190 |
|
|
191 |
address get_trampoline();
|
|
192 |
};
|
|
193 |
|
|
194 |
inline NativeCall* nativeCall_at(address address) {
|
|
195 |
NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
|
|
196 |
#ifdef ASSERT
|
|
197 |
call->verify();
|
|
198 |
#endif
|
|
199 |
return call;
|
|
200 |
}
|
|
201 |
|
|
202 |
inline NativeCall* nativeCall_before(address return_address) {
|
|
203 |
NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
|
|
204 |
#ifdef ASSERT
|
|
205 |
call->verify();
|
|
206 |
#endif
|
|
207 |
return call;
|
|
208 |
}
|
|
209 |
|
|
210 |
// An interface for accessing/manipulating native mov reg, imm instructions.
|
|
211 |
// (used to manipulate inlined 64-bit data calls, etc.)
|
|
212 |
class NativeMovConstReg: public NativeInstruction {
|
|
213 |
public:
|
|
214 |
enum Aarch64_specific_constants {
|
|
215 |
instruction_size = 3 * 4, // movz, movk, movk. See movptr().
|
|
216 |
instruction_offset = 0,
|
|
217 |
displacement_offset = 0,
|
|
218 |
};
|
|
219 |
|
|
220 |
address instruction_address() const { return addr_at(instruction_offset); }
|
|
221 |
address next_instruction_address() const {
|
|
222 |
if (nativeInstruction_at(instruction_address())->is_movz())
|
|
223 |
// Assume movz, movk, movk
|
|
224 |
return addr_at(instruction_size);
|
|
225 |
else if (is_adrp_at(instruction_address()))
|
|
226 |
return addr_at(2*4);
|
|
227 |
else if (is_ldr_literal_at(instruction_address()))
|
|
228 |
return(addr_at(4));
|
|
229 |
assert(false, "Unknown instruction in NativeMovConstReg");
|
|
230 |
return NULL;
|
|
231 |
}
|
|
232 |
|
|
233 |
intptr_t data() const;
|
|
234 |
void set_data(intptr_t x);
|
|
235 |
|
|
236 |
void flush() {
|
|
237 |
if (! maybe_cpool_ref(instruction_address())) {
|
|
238 |
ICache::invalidate_range(instruction_address(), instruction_size);
|
|
239 |
}
|
|
240 |
}
|
|
241 |
|
|
242 |
void verify();
|
|
243 |
void print();
|
|
244 |
|
|
245 |
// unit test stuff
|
|
246 |
static void test() {}
|
|
247 |
|
|
248 |
// Creation
|
|
249 |
inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
|
|
250 |
inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
|
|
251 |
};
|
|
252 |
|
|
253 |
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
|
254 |
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
|
|
255 |
#ifdef ASSERT
|
|
256 |
test->verify();
|
|
257 |
#endif
|
|
258 |
return test;
|
|
259 |
}
|
|
260 |
|
|
261 |
inline NativeMovConstReg* nativeMovConstReg_before(address address) {
|
|
262 |
NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
|
|
263 |
#ifdef ASSERT
|
|
264 |
test->verify();
|
|
265 |
#endif
|
|
266 |
return test;
|
|
267 |
}
|
|
268 |
|
|
269 |
class NativeMovConstRegPatching: public NativeMovConstReg {
|
|
270 |
private:
|
|
271 |
friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
|
|
272 |
NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
|
|
273 |
#ifdef ASSERT
|
|
274 |
test->verify();
|
|
275 |
#endif
|
|
276 |
return test;
|
|
277 |
}
|
|
278 |
};
|
|
279 |
|
|
280 |
// An interface for accessing/manipulating native moves of the form:
|
|
281 |
// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
|
|
282 |
// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
|
|
283 |
// mov[s/z]x[w/b/q] [reg + offset], reg
|
|
284 |
// fld_s [reg+offset]
|
|
285 |
// fld_d [reg+offset]
|
|
286 |
// fstp_s [reg + offset]
|
|
287 |
// fstp_d [reg + offset]
|
|
288 |
// mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
|
|
289 |
//
|
|
290 |
// Warning: These routines must be able to handle any instruction sequences
|
|
291 |
// that are generated as a result of the load/store byte,word,long
|
|
292 |
// macros. For example: The load_unsigned_byte instruction generates
|
|
293 |
// an xor reg,reg inst prior to generating the movb instruction. This
|
|
294 |
// class must skip the xor instruction.
|
|
295 |
|
|
296 |
class NativeMovRegMem: public NativeInstruction {
|
|
297 |
enum AArch64_specific_constants {
|
|
298 |
instruction_size = 4,
|
|
299 |
instruction_offset = 0,
|
|
300 |
data_offset = 0,
|
|
301 |
next_instruction_offset = 4
|
|
302 |
};
|
|
303 |
|
|
304 |
public:
|
|
305 |
// helper
|
|
306 |
int instruction_start() const;
|
|
307 |
|
|
308 |
address instruction_address() const;
|
|
309 |
|
|
310 |
address next_instruction_address() const;
|
|
311 |
|
|
312 |
int offset() const;
|
|
313 |
|
|
314 |
void set_offset(int x);
|
|
315 |
|
|
316 |
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
|
|
317 |
|
|
318 |
void verify();
|
|
319 |
void print ();
|
|
320 |
|
|
321 |
// unit test stuff
|
|
322 |
static void test() {}
|
|
323 |
|
|
324 |
private:
|
|
325 |
inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
|
|
326 |
};
|
|
327 |
|
|
328 |
inline NativeMovRegMem* nativeMovRegMem_at (address address) {
|
|
329 |
NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
|
|
330 |
#ifdef ASSERT
|
|
331 |
test->verify();
|
|
332 |
#endif
|
|
333 |
return test;
|
|
334 |
}
|
|
335 |
|
|
336 |
class NativeMovRegMemPatching: public NativeMovRegMem {
|
|
337 |
private:
|
|
338 |
friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; }
|
|
339 |
};
|
|
340 |
|
|
341 |
// An interface for accessing/manipulating native leal instruction of form:
|
|
342 |
// leal reg, [reg + offset]
|
|
343 |
|
|
344 |
class NativeLoadAddress: public NativeMovRegMem {
|
|
345 |
static const bool has_rex = true;
|
|
346 |
static const int rex_size = 1;
|
|
347 |
public:
|
|
348 |
|
|
349 |
void verify();
|
|
350 |
void print ();
|
|
351 |
|
|
352 |
// unit test stuff
|
|
353 |
static void test() {}
|
|
354 |
};
|
|
355 |
|
|
356 |
class NativeJump: public NativeInstruction {
|
|
357 |
public:
|
|
358 |
enum AArch64_specific_constants {
|
|
359 |
instruction_size = 4,
|
|
360 |
instruction_offset = 0,
|
|
361 |
data_offset = 0,
|
|
362 |
next_instruction_offset = 4
|
|
363 |
};
|
|
364 |
|
|
365 |
address instruction_address() const { return addr_at(instruction_offset); }
|
|
366 |
address next_instruction_address() const { return addr_at(instruction_size); }
|
|
367 |
address jump_destination() const;
|
|
368 |
void set_jump_destination(address dest);
|
|
369 |
|
|
370 |
// Creation
|
|
371 |
inline friend NativeJump* nativeJump_at(address address);
|
|
372 |
|
|
373 |
void verify();
|
|
374 |
|
|
375 |
// Unit testing stuff
|
|
376 |
static void test() {}
|
|
377 |
|
|
378 |
// Insertion of native jump instruction
|
|
379 |
static void insert(address code_pos, address entry);
|
|
380 |
// MT-safe insertion of native jump at verified method entry
|
|
381 |
static void check_verified_entry_alignment(address entry, address verified_entry);
|
|
382 |
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
|
383 |
};
|
|
384 |
|
|
385 |
inline NativeJump* nativeJump_at(address address) {
|
|
386 |
NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
|
|
387 |
#ifdef ASSERT
|
|
388 |
jump->verify();
|
|
389 |
#endif
|
|
390 |
return jump;
|
|
391 |
}
|
|
392 |
|
|
393 |
class NativeGeneralJump: public NativeJump {
|
|
394 |
public:
|
|
395 |
enum AArch64_specific_constants {
|
|
396 |
instruction_size = 4 * 4,
|
|
397 |
instruction_offset = 0,
|
|
398 |
data_offset = 0,
|
|
399 |
next_instruction_offset = 4 * 4
|
|
400 |
};
|
|
401 |
static void insert_unconditional(address code_pos, address entry);
|
|
402 |
static void replace_mt_safe(address instr_addr, address code_buffer);
|
|
403 |
static void verify();
|
|
404 |
};
|
|
405 |
|
|
406 |
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
|
407 |
NativeGeneralJump* jump = (NativeGeneralJump*)(address);
|
|
408 |
debug_only(jump->verify();)
|
|
409 |
return jump;
|
|
410 |
}
|
|
411 |
|
|
412 |
class NativePopReg : public NativeInstruction {
|
|
413 |
public:
|
|
414 |
// Insert a pop instruction
|
|
415 |
static void insert(address code_pos, Register reg);
|
|
416 |
};
|
|
417 |
|
|
418 |
|
|
419 |
class NativeIllegalInstruction: public NativeInstruction {
|
|
420 |
public:
|
|
421 |
// Insert illegal opcode as specific address
|
|
422 |
static void insert(address code_pos);
|
|
423 |
};
|
|
424 |
|
|
425 |
// return instruction that does not pop values of the stack
|
|
426 |
class NativeReturn: public NativeInstruction {
|
|
427 |
public:
|
|
428 |
};
|
|
429 |
|
|
430 |
// return instruction that does pop values of the stack
|
|
431 |
class NativeReturnX: public NativeInstruction {
|
|
432 |
public:
|
|
433 |
};
|
|
434 |
|
|
435 |
// Simple test vs memory
|
|
436 |
class NativeTstRegMem: public NativeInstruction {
|
|
437 |
public:
|
|
438 |
};
|
|
439 |
|
|
440 |
inline bool NativeInstruction::is_nop() {
|
|
441 |
uint32_t insn = *(uint32_t*)addr_at(0);
|
|
442 |
return insn == 0xd503201f;
|
|
443 |
}
|
|
444 |
|
|
445 |
inline bool NativeInstruction::is_jump() {
|
|
446 |
uint32_t insn = *(uint32_t*)addr_at(0);
|
|
447 |
|
|
448 |
if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
|
|
449 |
// Unconditional branch (immediate)
|
|
450 |
return true;
|
|
451 |
} else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
|
|
452 |
// Conditional branch (immediate)
|
|
453 |
return true;
|
|
454 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
|
|
455 |
// Compare & branch (immediate)
|
|
456 |
return true;
|
|
457 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
|
|
458 |
// Test & branch (immediate)
|
|
459 |
return true;
|
|
460 |
} else
|
|
461 |
return false;
|
|
462 |
}
|
|
463 |
|
|
464 |
inline bool NativeInstruction::is_jump_or_nop() {
|
|
465 |
return is_nop() || is_jump();
|
|
466 |
}
|
|
467 |
|
|
468 |
// Call trampoline stubs.
|
|
469 |
class NativeCallTrampolineStub : public NativeInstruction {
|
|
470 |
public:
|
|
471 |
|
|
472 |
enum AArch64_specific_constants {
|
|
473 |
instruction_size = 4 * 4,
|
|
474 |
instruction_offset = 0,
|
|
475 |
data_offset = 2 * 4,
|
|
476 |
next_instruction_offset = 4 * 4
|
|
477 |
};
|
|
478 |
|
|
479 |
address destination(nmethod *nm = NULL) const;
|
|
480 |
void set_destination(address new_destination);
|
|
481 |
ptrdiff_t destination_offset() const;
|
|
482 |
};
|
|
483 |
|
|
484 |
inline bool is_NativeCallTrampolineStub_at(address addr) {
|
|
485 |
// Ensure that the stub is exactly
|
|
486 |
// ldr xscratch1, L
|
|
487 |
// br xscratch1
|
|
488 |
// L:
|
|
489 |
uint32_t *i = (uint32_t *)addr;
|
|
490 |
return i[0] == 0x58000048 && i[1] == 0xd61f0100;
|
|
491 |
}
|
|
492 |
|
|
493 |
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
|
|
494 |
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
|
|
495 |
return (NativeCallTrampolineStub*)addr;
|
|
496 |
}
|
|
497 |
|
33193
|
498 |
class NativeMembar : public NativeInstruction {
|
|
499 |
public:
|
|
500 |
unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); }
|
|
501 |
void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); }
|
|
502 |
};
|
|
503 |
|
|
504 |
inline NativeMembar *NativeMembar_at(address addr) {
|
|
505 |
assert(nativeInstruction_at(addr)->is_Membar(), "no membar found");
|
|
506 |
return (NativeMembar*)addr;
|
|
507 |
}
|
|
508 |
|
29183
|
509 |
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|