|
1 /* |
|
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 // We have interfaces for the following instructions: |
|
26 // - NativeInstruction |
|
27 // - - NativeCall |
|
28 // - - NativeMovConstReg |
|
29 // - - NativeMovConstRegPatching |
|
30 // - - NativeMovRegMem |
|
31 // - - NativeMovRegMemPatching |
|
32 // - - NativeJump |
|
33 // - - NativeIllegalOpCode |
|
34 // - - NativeGeneralJump |
|
35 // - - NativeReturn |
|
36 // - - NativeReturnX (return with argument) |
|
37 // - - NativePushConst |
|
38 // - - NativeTstRegMem |
|
39 |
|
40 // The base class for different kinds of native instruction abstractions. |
|
41 // Provides the primitive operations to manipulate code relative to this. |
|
42 |
|
43 class NativeInstruction VALUE_OBJ_CLASS_SPEC { |
|
44 friend class Relocation; |
|
45 |
|
46 public: |
|
47 enum Intel_specific_constants { |
|
48 nop_instruction_code = 0x90, |
|
49 nop_instruction_size = 1 |
|
50 }; |
|
51 |
|
52 bool is_nop() { return ubyte_at(0) == nop_instruction_code; } |
|
53 inline bool is_call(); |
|
54 inline bool is_illegal(); |
|
55 inline bool is_return(); |
|
56 inline bool is_jump(); |
|
57 inline bool is_cond_jump(); |
|
58 inline bool is_safepoint_poll(); |
|
59 inline bool is_mov_literal64(); |
|
60 |
|
61 protected: |
|
62 address addr_at(int offset) const { return address(this) + offset; } |
|
63 |
|
64 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } |
|
65 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } |
|
66 |
|
67 jint int_at(int offset) const { return *(jint*) addr_at(offset); } |
|
68 |
|
69 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } |
|
70 |
|
71 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } |
|
72 |
|
73 |
|
74 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } |
|
75 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } |
|
76 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } |
|
77 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } |
|
78 |
|
79 // This doesn't really do anything on Intel, but it is the place where |
|
80 // cache invalidation belongs, generically: |
|
81 void wrote(int offset); |
|
82 |
|
83 public: |
|
84 |
|
85 // unit test stuff |
|
86 static void test() {} // override for testing |
|
87 |
|
88 inline friend NativeInstruction* nativeInstruction_at(address address); |
|
89 }; |
|
90 |
|
91 inline NativeInstruction* nativeInstruction_at(address address) { |
|
92 NativeInstruction* inst = (NativeInstruction*)address; |
|
93 #ifdef ASSERT |
|
94 //inst->verify(); |
|
95 #endif |
|
96 return inst; |
|
97 } |
|
98 |
|
99 inline NativeCall* nativeCall_at(address address); |
|
100 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off |
|
101 // instructions (used to manipulate inline caches, primitive & dll calls, etc.). |
|
102 |
|
103 class NativeCall: public NativeInstruction { |
|
104 public: |
|
105 enum Intel_specific_constants { |
|
106 instruction_code = 0xE8, |
|
107 instruction_size = 5, |
|
108 instruction_offset = 0, |
|
109 displacement_offset = 1, |
|
110 return_address_offset = 5 |
|
111 }; |
|
112 |
|
113 enum { cache_line_size = BytesPerWord }; // conservative estimate! |
|
114 |
|
115 address instruction_address() const { return addr_at(instruction_offset); } |
|
116 address next_instruction_address() const { return addr_at(return_address_offset); } |
|
117 int displacement() const { return (jint) int_at(displacement_offset); } |
|
118 address displacement_address() const { return addr_at(displacement_offset); } |
|
119 address return_address() const { return addr_at(return_address_offset); } |
|
120 address destination() const; |
|
121 void set_destination(address dest) { |
|
122 #ifdef AMD64 |
|
123 assert((labs((intptr_t) dest - (intptr_t) return_address()) & |
|
124 0xFFFFFFFF00000000) == 0, |
|
125 "must be 32bit offset"); |
|
126 #endif // AMD64 |
|
127 set_int_at(displacement_offset, dest - return_address()); |
|
128 } |
|
129 void set_destination_mt_safe(address dest); |
|
130 |
|
131 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); } |
|
132 void verify(); |
|
133 void print(); |
|
134 |
|
135 // Creation |
|
136 inline friend NativeCall* nativeCall_at(address address); |
|
137 inline friend NativeCall* nativeCall_before(address return_address); |
|
138 |
|
139 static bool is_call_at(address instr) { |
|
140 return ((*instr) & 0xFF) == NativeCall::instruction_code; |
|
141 } |
|
142 |
|
143 static bool is_call_before(address return_address) { |
|
144 return is_call_at(return_address - NativeCall::return_address_offset); |
|
145 } |
|
146 |
|
147 static bool is_call_to(address instr, address target) { |
|
148 return nativeInstruction_at(instr)->is_call() && |
|
149 nativeCall_at(instr)->destination() == target; |
|
150 } |
|
151 |
|
152 // MT-safe patching of a call instruction. |
|
153 static void insert(address code_pos, address entry); |
|
154 |
|
155 static void replace_mt_safe(address instr_addr, address code_buffer); |
|
156 }; |
|
157 |
|
158 inline NativeCall* nativeCall_at(address address) { |
|
159 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); |
|
160 #ifdef ASSERT |
|
161 call->verify(); |
|
162 #endif |
|
163 return call; |
|
164 } |
|
165 |
|
166 inline NativeCall* nativeCall_before(address return_address) { |
|
167 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); |
|
168 #ifdef ASSERT |
|
169 call->verify(); |
|
170 #endif |
|
171 return call; |
|
172 } |
|
173 |
|
174 // An interface for accessing/manipulating native mov reg, imm32 instructions. |
|
175 // (used to manipulate inlined 32bit data dll calls, etc.) |
|
176 class NativeMovConstReg: public NativeInstruction { |
|
177 #ifdef AMD64 |
|
178 static const bool has_rex = true; |
|
179 static const int rex_size = 1; |
|
180 #else |
|
181 static const bool has_rex = false; |
|
182 static const int rex_size = 0; |
|
183 #endif // AMD64 |
|
184 public: |
|
185 enum Intel_specific_constants { |
|
186 instruction_code = 0xB8, |
|
187 instruction_size = 1 + rex_size + wordSize, |
|
188 instruction_offset = 0, |
|
189 data_offset = 1 + rex_size, |
|
190 next_instruction_offset = instruction_size, |
|
191 register_mask = 0x07 |
|
192 }; |
|
193 |
|
194 address instruction_address() const { return addr_at(instruction_offset); } |
|
195 address next_instruction_address() const { return addr_at(next_instruction_offset); } |
|
196 intptr_t data() const { return ptr_at(data_offset); } |
|
197 void set_data(intptr_t x) { set_ptr_at(data_offset, x); } |
|
198 |
|
199 void verify(); |
|
200 void print(); |
|
201 |
|
202 // unit test stuff |
|
203 static void test() {} |
|
204 |
|
205 // Creation |
|
206 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); |
|
207 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); |
|
208 }; |
|
209 |
|
210 inline NativeMovConstReg* nativeMovConstReg_at(address address) { |
|
211 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); |
|
212 #ifdef ASSERT |
|
213 test->verify(); |
|
214 #endif |
|
215 return test; |
|
216 } |
|
217 |
|
218 inline NativeMovConstReg* nativeMovConstReg_before(address address) { |
|
219 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); |
|
220 #ifdef ASSERT |
|
221 test->verify(); |
|
222 #endif |
|
223 return test; |
|
224 } |
|
225 |
|
226 class NativeMovConstRegPatching: public NativeMovConstReg { |
|
227 private: |
|
228 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { |
|
229 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); |
|
230 #ifdef ASSERT |
|
231 test->verify(); |
|
232 #endif |
|
233 return test; |
|
234 } |
|
235 }; |
|
236 |
|
237 #ifndef AMD64 |
|
238 |
|
239 // An interface for accessing/manipulating native moves of the form: |
|
240 // mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) |
|
241 // mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg |
|
242 // mov[s/z]x[w/b] [reg + offset], reg |
|
243 // fld_s [reg+offset] |
|
244 // fld_d [reg+offset] |
|
245 // fstp_s [reg + offset] |
|
246 // fstp_d [reg + offset] |
|
247 // |
|
248 // Warning: These routines must be able to handle any instruction sequences |
|
249 // that are generated as a result of the load/store byte,word,long |
|
250 // macros. For example: The load_unsigned_byte instruction generates |
|
251 // an xor reg,reg inst prior to generating the movb instruction. This |
|
252 // class must skip the xor instruction. |
|
253 |
|
254 class NativeMovRegMem: public NativeInstruction { |
|
255 public: |
|
256 enum Intel_specific_constants { |
|
257 instruction_code_xor = 0x33, |
|
258 instruction_extended_prefix = 0x0F, |
|
259 instruction_code_mem2reg_movzxb = 0xB6, |
|
260 instruction_code_mem2reg_movsxb = 0xBE, |
|
261 instruction_code_mem2reg_movzxw = 0xB7, |
|
262 instruction_code_mem2reg_movsxw = 0xBF, |
|
263 instruction_operandsize_prefix = 0x66, |
|
264 instruction_code_reg2meml = 0x89, |
|
265 instruction_code_mem2regl = 0x8b, |
|
266 instruction_code_reg2memb = 0x88, |
|
267 instruction_code_mem2regb = 0x8a, |
|
268 instruction_code_float_s = 0xd9, |
|
269 instruction_code_float_d = 0xdd, |
|
270 instruction_code_long_volatile = 0xdf, |
|
271 instruction_code_xmm_ss_prefix = 0xf3, |
|
272 instruction_code_xmm_sd_prefix = 0xf2, |
|
273 instruction_code_xmm_code = 0x0f, |
|
274 instruction_code_xmm_load = 0x10, |
|
275 instruction_code_xmm_store = 0x11, |
|
276 instruction_code_xmm_lpd = 0x12, |
|
277 |
|
278 instruction_size = 4, |
|
279 instruction_offset = 0, |
|
280 data_offset = 2, |
|
281 next_instruction_offset = 4 |
|
282 }; |
|
283 |
|
284 address instruction_address() const { |
|
285 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && |
|
286 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { |
|
287 return addr_at(instruction_offset+1); // Not SSE instructions |
|
288 } |
|
289 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { |
|
290 return addr_at(instruction_offset+1); |
|
291 } |
|
292 else if (*addr_at(instruction_offset) == instruction_code_xor) { |
|
293 return addr_at(instruction_offset+2); |
|
294 } |
|
295 else return addr_at(instruction_offset); |
|
296 } |
|
297 |
|
298 address next_instruction_address() const { |
|
299 switch (*addr_at(instruction_offset)) { |
|
300 case instruction_operandsize_prefix: |
|
301 if (*addr_at(instruction_offset+1) == instruction_code_xmm_code) |
|
302 return instruction_address() + instruction_size; // SSE instructions |
|
303 case instruction_extended_prefix: |
|
304 return instruction_address() + instruction_size + 1; |
|
305 case instruction_code_reg2meml: |
|
306 case instruction_code_mem2regl: |
|
307 case instruction_code_reg2memb: |
|
308 case instruction_code_mem2regb: |
|
309 case instruction_code_xor: |
|
310 return instruction_address() + instruction_size + 2; |
|
311 default: |
|
312 return instruction_address() + instruction_size; |
|
313 } |
|
314 } |
|
315 int offset() const{ |
|
316 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && |
|
317 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { |
|
318 return int_at(data_offset+1); // Not SSE instructions |
|
319 } |
|
320 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { |
|
321 return int_at(data_offset+1); |
|
322 } |
|
323 else if (*addr_at(instruction_offset) == instruction_code_xor || |
|
324 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || |
|
325 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || |
|
326 *addr_at(instruction_offset) == instruction_operandsize_prefix) { |
|
327 return int_at(data_offset+2); |
|
328 } |
|
329 else return int_at(data_offset); |
|
330 } |
|
331 |
|
332 void set_offset(int x) { |
|
333 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && |
|
334 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { |
|
335 set_int_at(data_offset+1, x); // Not SSE instructions |
|
336 } |
|
337 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { |
|
338 set_int_at(data_offset+1, x); |
|
339 } |
|
340 else if (*addr_at(instruction_offset) == instruction_code_xor || |
|
341 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || |
|
342 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || |
|
343 *addr_at(instruction_offset) == instruction_operandsize_prefix) { |
|
344 set_int_at(data_offset+2, x); |
|
345 } |
|
346 else set_int_at(data_offset, x); |
|
347 } |
|
348 |
|
349 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } |
|
350 void copy_instruction_to(address new_instruction_address); |
|
351 |
|
352 void verify(); |
|
353 void print (); |
|
354 |
|
355 // unit test stuff |
|
356 static void test() {} |
|
357 |
|
358 private: |
|
359 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); |
|
360 }; |
|
361 |
|
362 inline NativeMovRegMem* nativeMovRegMem_at (address address) { |
|
363 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); |
|
364 #ifdef ASSERT |
|
365 test->verify(); |
|
366 #endif |
|
367 return test; |
|
368 } |
|
369 |
|
370 class NativeMovRegMemPatching: public NativeMovRegMem { |
|
371 private: |
|
372 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { |
|
373 NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); |
|
374 #ifdef ASSERT |
|
375 test->verify(); |
|
376 #endif |
|
377 return test; |
|
378 } |
|
379 }; |
|
380 |
|
381 |
|
382 |
|
383 // An interface for accessing/manipulating native leal instruction of form: |
|
384 // leal reg, [reg + offset] |
|
385 |
|
386 class NativeLoadAddress: public NativeMovRegMem { |
|
387 public: |
|
388 enum Intel_specific_constants { |
|
389 instruction_code = 0x8D |
|
390 }; |
|
391 |
|
392 void verify(); |
|
393 void print (); |
|
394 |
|
395 // unit test stuff |
|
396 static void test() {} |
|
397 |
|
398 private: |
|
399 friend NativeLoadAddress* nativeLoadAddress_at (address address) { |
|
400 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); |
|
401 #ifdef ASSERT |
|
402 test->verify(); |
|
403 #endif |
|
404 return test; |
|
405 } |
|
406 }; |
|
407 |
|
408 #endif // AMD64 |
|
409 |
|
410 // jump rel32off |
|
411 |
|
412 class NativeJump: public NativeInstruction { |
|
413 public: |
|
414 enum Intel_specific_constants { |
|
415 instruction_code = 0xe9, |
|
416 instruction_size = 5, |
|
417 instruction_offset = 0, |
|
418 data_offset = 1, |
|
419 next_instruction_offset = 5 |
|
420 }; |
|
421 |
|
422 address instruction_address() const { return addr_at(instruction_offset); } |
|
423 address next_instruction_address() const { return addr_at(next_instruction_offset); } |
|
424 address jump_destination() const { |
|
425 address dest = (int_at(data_offset)+next_instruction_address()); |
|
426 #ifdef AMD64 // What is this about? |
|
427 // return -1 if jump to self |
|
428 dest = (dest == (address) this) ? (address) -1 : dest; |
|
429 #endif // AMD64 |
|
430 return dest; |
|
431 } |
|
432 |
|
433 void set_jump_destination(address dest) { |
|
434 intptr_t val = dest - next_instruction_address(); |
|
435 #ifdef AMD64 |
|
436 if (dest == (address) -1) { // can't encode jump to -1 |
|
437 val = -5; // jump to self |
|
438 } else { |
|
439 assert((labs(val) & 0xFFFFFFFF00000000) == 0, |
|
440 "must be 32bit offset"); |
|
441 } |
|
442 #endif // AMD64 |
|
443 set_int_at(data_offset, (jint)val); |
|
444 } |
|
445 |
|
446 // Creation |
|
447 inline friend NativeJump* nativeJump_at(address address); |
|
448 |
|
449 void verify(); |
|
450 |
|
451 // Unit testing stuff |
|
452 static void test() {} |
|
453 |
|
454 // Insertion of native jump instruction |
|
455 static void insert(address code_pos, address entry); |
|
456 // MT-safe insertion of native jump at verified method entry |
|
457 static void check_verified_entry_alignment(address entry, address verified_entry); |
|
458 static void patch_verified_entry(address entry, address verified_entry, address dest); |
|
459 }; |
|
460 |
|
461 inline NativeJump* nativeJump_at(address address) { |
|
462 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); |
|
463 #ifdef ASSERT |
|
464 jump->verify(); |
|
465 #endif |
|
466 return jump; |
|
467 } |
|
468 |
|
469 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional |
|
470 class NativeGeneralJump: public NativeInstruction { |
|
471 public: |
|
472 enum Intel_specific_constants { |
|
473 // Constants does not apply, since the lengths and offsets depends on the actual jump |
|
474 // used |
|
475 // Instruction codes: |
|
476 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) |
|
477 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) |
|
478 unconditional_long_jump = 0xe9, |
|
479 unconditional_short_jump = 0xeb, |
|
480 instruction_size = 5 |
|
481 }; |
|
482 |
|
483 address instruction_address() const { return addr_at(0); } |
|
484 address jump_destination() const; |
|
485 |
|
486 // Creation |
|
487 inline friend NativeGeneralJump* nativeGeneralJump_at(address address); |
|
488 |
|
489 // Insertion of native general jump instruction |
|
490 static void insert_unconditional(address code_pos, address entry); |
|
491 static void replace_mt_safe(address instr_addr, address code_buffer); |
|
492 |
|
493 void verify(); |
|
494 }; |
|
495 |
|
496 inline NativeGeneralJump* nativeGeneralJump_at(address address) { |
|
497 NativeGeneralJump* jump = (NativeGeneralJump*)(address); |
|
498 debug_only(jump->verify();) |
|
499 return jump; |
|
500 } |
|
501 |
|
502 class NativePopReg : public NativeInstruction { |
|
503 public: |
|
504 enum Intel_specific_constants { |
|
505 instruction_code = 0x58, |
|
506 instruction_size = 1, |
|
507 instruction_offset = 0, |
|
508 data_offset = 1, |
|
509 next_instruction_offset = 1 |
|
510 }; |
|
511 |
|
512 // Insert a pop instruction |
|
513 static void insert(address code_pos, Register reg); |
|
514 }; |
|
515 |
|
516 |
|
517 class NativeIllegalInstruction: public NativeInstruction { |
|
518 public: |
|
519 enum Intel_specific_constants { |
|
520 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B |
|
521 instruction_size = 2, |
|
522 instruction_offset = 0, |
|
523 next_instruction_offset = 2 |
|
524 }; |
|
525 |
|
526 // Insert illegal opcode as specific address |
|
527 static void insert(address code_pos); |
|
528 }; |
|
529 |
|
530 // return instruction that does not pop values of the stack |
|
531 class NativeReturn: public NativeInstruction { |
|
532 public: |
|
533 enum Intel_specific_constants { |
|
534 instruction_code = 0xC3, |
|
535 instruction_size = 1, |
|
536 instruction_offset = 0, |
|
537 next_instruction_offset = 1 |
|
538 }; |
|
539 }; |
|
540 |
|
541 // return instruction that does pop values of the stack |
|
542 class NativeReturnX: public NativeInstruction { |
|
543 public: |
|
544 enum Intel_specific_constants { |
|
545 instruction_code = 0xC2, |
|
546 instruction_size = 2, |
|
547 instruction_offset = 0, |
|
548 next_instruction_offset = 2 |
|
549 }; |
|
550 }; |
|
551 |
|
552 // Simple test vs memory |
|
553 class NativeTstRegMem: public NativeInstruction { |
|
554 public: |
|
555 enum Intel_specific_constants { |
|
556 instruction_code_memXregl = 0x85 |
|
557 }; |
|
558 }; |
|
559 |
|
560 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } |
|
561 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } |
|
562 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || |
|
563 ubyte_at(0) == NativeReturnX::instruction_code; } |
|
564 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || |
|
565 ubyte_at(0) == 0xEB; /* short jump */ } |
|
566 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || |
|
567 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } |
|
568 inline bool NativeInstruction::is_safepoint_poll() { |
|
569 #ifdef AMD64 |
|
570 return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && |
|
571 ubyte_at(1) == 0x05 && // 00 rax 101 |
|
572 ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); |
|
573 #else |
|
574 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || |
|
575 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && |
|
576 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ |
|
577 (os::is_poll_address((address)int_at(2))); |
|
578 #endif // AMD64 |
|
579 } |
|
580 |
|
581 inline bool NativeInstruction::is_mov_literal64() { |
|
582 #ifdef AMD64 |
|
583 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) && |
|
584 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); |
|
585 #else |
|
586 return false; |
|
587 #endif // AMD64 |
|
588 } |