8233081: C1: PatchingStub for field access copies too much
Reviewed-by: thartmann, dlong
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Tue Nov 05 11:53:46 2019 +0100
@@ -287,8 +287,6 @@
//-------------------------------------------------------------------
-address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); }
-
int NativeMovRegMem::offset() const {
address pc = instruction_address();
unsigned insn = *(unsigned*)pc;
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -381,11 +381,11 @@
public:
// helper
- int instruction_start() const;
+ int instruction_start() const { return instruction_offset; }
- address instruction_address() const;
+ address instruction_address() const { return addr_at(instruction_offset); }
- address next_instruction_address() const;
+ int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; }
int offset() const;
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -349,6 +349,11 @@
// (field access patching is handled differently in that case)
class NativeMovRegMem: public NativeInstruction {
public:
+ enum arm_specific_constants {
+ instruction_size = 8
+ };
+
+ int num_bytes_to_end_of_patch() const { return instruction_size; }
int offset() const;
void set_offset(int x);
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -462,6 +462,8 @@
address instruction_address() const { return addr_at(0); }
+ int num_bytes_to_end_of_patch() const { return instruction_size; }
+
intptr_t offset() const {
#ifdef VM_LITTLE_ENDIAN
short *hi_ptr = (short*)(addr_at(0));
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -535,6 +535,12 @@
inline NativeMovRegMem* nativeMovRegMem_at (address address);
class NativeMovRegMem: public NativeInstruction {
public:
+ enum z_specific_constants {
+ instruction_size = 12 // load_const used with access_field_id
+ };
+
+ int num_bytes_to_end_of_patch() const { return instruction_size; }
+
intptr_t offset() const {
return nativeMovConstReg_at(addr_at(0))->data();
}
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.cpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.cpp Tue Nov 05 11:53:46 2019 +0100
@@ -574,15 +574,6 @@
//-------------------------------------------------------------------
-void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
- Untested("copy_instruction_to");
- int instruction_size = next_instruction_address() - instruction_address();
- for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
- *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
- }
-}
-
-
void NativeMovRegMem::verify() {
NativeInstruction::verify();
// make sure code pattern is actually a "ld" or "st" of some sort.
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -576,7 +576,8 @@
// sethi and the add. The nop is required to be in the delay slot of the call instruction
// which overwrites the sethi during patching.
class NativeMovConstRegPatching;
-inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
+inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);
+class NativeMovConstRegPatching: public NativeInstruction {
public:
enum Sparc_specific_constants {
sethi_offset = 0,
@@ -664,10 +665,13 @@
return (is_op(i0, Assembler::ldst_op));
}
- address instruction_address() const { return addr_at(0); }
- address next_instruction_address() const {
- return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
+ address instruction_address() const { return addr_at(0); }
+
+ int num_bytes_to_end_of_patch() const {
+ return is_immediate()? BytesPerInstWord :
+ NativeMovConstReg::instruction_size;
}
+
intptr_t offset() const {
return is_immediate()? inv_simm(long_at(0), offset_width) :
nativeMovConstReg_at(addr_at(0))->data();
@@ -684,8 +688,6 @@
set_offset (offset() + radd_offset);
}
- void copy_instruction_to(address new_instruction_address);
-
void verify();
void print ();
--- a/src/hotspot/cpu/x86/nativeInst_x86.cpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp Tue Nov 05 11:53:46 2019 +0100
@@ -355,60 +355,7 @@
return off;
}
-address NativeMovRegMem::instruction_address() const {
- return addr_at(instruction_start());
-}
-
-address NativeMovRegMem::next_instruction_address() const {
- address ret = instruction_address() + instruction_size;
- u_char instr_0 = *(u_char*) instruction_address();
- switch (instr_0) {
- case instruction_operandsize_prefix:
-
- fatal("should have skipped instruction_operandsize_prefix");
- break;
-
- case instruction_extended_prefix:
- fatal("should have skipped instruction_extended_prefix");
- break;
-
- case instruction_code_mem2reg_movslq: // 0x63
- case instruction_code_mem2reg_movzxb: // 0xB6
- case instruction_code_mem2reg_movsxb: // 0xBE
- case instruction_code_mem2reg_movzxw: // 0xB7
- case instruction_code_mem2reg_movsxw: // 0xBF
- case instruction_code_reg2mem: // 0x89 (q/l)
- case instruction_code_mem2reg: // 0x8B (q/l)
- case instruction_code_reg2memb: // 0x88
- case instruction_code_mem2regb: // 0x8a
-
- case instruction_code_lea: // 0x8d
-
- case instruction_code_float_s: // 0xd9 fld_s a
- case instruction_code_float_d: // 0xdd fld_d a
-
- case instruction_code_xmm_load: // 0x10
- case instruction_code_xmm_store: // 0x11
- case instruction_code_xmm_lpd: // 0x12
- {
- // If there is an SIB then instruction is longer than expected
- u_char mod_rm = *(u_char*)(instruction_address() + 1);
- if ((mod_rm & 7) == 0x4) {
- ret++;
- }
- }
- case instruction_code_xor:
- fatal("should have skipped xor lead in");
- break;
-
- default:
- fatal("not a NativeMovRegMem");
- }
- return ret;
-
-}
-
-int NativeMovRegMem::offset() const{
+int NativeMovRegMem::patch_offset() const {
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
// nnnn(r12|rsp) isn't coded as simple mod/rm since that is
@@ -417,19 +364,7 @@
if ((mod_rm & 7) == 0x4) {
off++;
}
- return int_at(off);
-}
-
-void NativeMovRegMem::set_offset(int x) {
- int off = data_offset + instruction_start();
- u_char mod_rm = *(u_char*)(instruction_address() + 1);
- // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
- // the encoding to use an SIB byte. Which will have the nnnn
- // field off by one byte
- if ((mod_rm & 7) == 0x4) {
- off++;
- }
- set_int_at(off, x);
+ return off;
}
void NativeMovRegMem::verify() {
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -361,7 +361,6 @@
instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes,
- instruction_size = 4,
instruction_offset = 0,
data_offset = 2,
next_instruction_offset = 4
@@ -370,15 +369,26 @@
// helper
int instruction_start() const;
- address instruction_address() const;
+ address instruction_address() const {
+ return addr_at(instruction_start());
+ }
- address next_instruction_address() const;
+ int num_bytes_to_end_of_patch() const {
+ return patch_offset() + sizeof(jint);
+ }
- int offset() const;
+ int offset() const {
+ return int_at(patch_offset());
+ }
- void set_offset(int x);
+ void set_offset(int x) {
+ set_int_at(patch_offset(), x);
+ }
- void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
+ void add_offset_in_bytes(int add_offset) {
+ int patch_off = patch_offset();
+ set_int_at(patch_off, int_at(patch_off) + add_offset);
+ }
void verify();
void print ();
@@ -387,6 +397,7 @@
static void test() {}
private:
+ int patch_offset() const;
inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
};
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp Thu Oct 31 17:16:36 2019 +0100
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp Tue Nov 05 11:53:46 2019 +0100
@@ -409,7 +409,7 @@
if (_id == PatchingStub::access_field_id) {
// embed a fixed offset to handle long patches which need to be offset by a word.
// the patching code will just add the field offset field to this offset so
- // that we can refernce either the high or low word of a double word field.
+ // that we can reference either the high or low word of a double word field.
int field_offset = 0;
switch (patch_code) {
case lir_patch_low: field_offset = lo_word_offset_in_bytes; break;
@@ -419,6 +419,8 @@
}
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset);
+ // Copy will never get executed, so only copy the part which is required for patching.
+ _bytes_to_copy = MAX2(n_move->num_bytes_to_end_of_patch(), (int)NativeGeneralJump::instruction_size);
} else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT