author | kvn |
Tue, 04 Sep 2018 12:44:02 -0700 | |
changeset 51633 | 21154cb84d2a |
parent 51632 | ed04bc1ff453 |
child 51634 | 3f189f451ff1 |
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -592,7 +592,7 @@ // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. static int pd_patch_instruction_size(address branch, address target); - static void pd_patch_instruction(address branch, address target) { + static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { pd_patch_instruction_size(branch, target); } static address pd_call_destination(address branch) {
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -1276,7 +1276,7 @@ inc_counter((address) counter_addr, tmpreg1, tmpreg2); } - void pd_patch_instruction(address branch, address target); + void pd_patch_instruction(address branch, address target, const char* file, int line); // Loading and storing values by size and signed-ness; // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM);
--- a/src/hotspot/cpu/arm/macroAssembler_arm.inline.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/arm/macroAssembler_arm.inline.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -30,7 +30,7 @@ #include "code/codeCache.hpp" #include "runtime/handles.inline.hpp" -inline void MacroAssembler::pd_patch_instruction(address branch, address target) { +inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { int instr = *(int*)branch; int new_offset = (int)(target - branch NOT_AARCH64(- 8)); assert((new_offset & 3) == 0, "bad alignment");
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -162,7 +162,7 @@ // branch, jump // - inline void pd_patch_instruction(address branch, address target); + inline void pd_patch_instruction(address branch, address target, const char* file, int line); NOT_PRODUCT(static void pd_print_patched_instruction(address branch);) // Conditional far branch for destinations encodable in 24+2 bits.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -186,7 +186,7 @@ load_const(d, obj_addr); } -inline void MacroAssembler::pd_patch_instruction(address branch, address target) { +inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { jint& stub_inst = *(jint*) branch; stub_inst = patched_branch(target - branch, stub_inst, 0); }
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -1904,7 +1904,7 @@ // Only called when binding labels (share/vm/asm/assembler.cpp) // Pass arguments as intended. Do not pre-calculate distance. -void MacroAssembler::pd_patch_instruction(address branch, address target) { +void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { unsigned long stub_inst; int inst_len = get_instruction(branch, &stub_inst);
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -360,7 +360,7 @@ // Use one generic function for all branch patches. static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos); - void pd_patch_instruction(address branch, address target); + void pd_patch_instruction(address branch, address target, const char* file, int line); // Extract relative address from "relative" instructions. static long get_pcrel_offset(unsigned long inst);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -659,7 +659,7 @@ // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. - void pd_patch_instruction(address branch, address target); + void pd_patch_instruction(address branch, address target, const char* file, int line); // sethi Macro handles optimizations and relocations private:
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -38,7 +38,7 @@ } -inline void MacroAssembler::pd_patch_instruction(address branch, address target) { +inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) { jint& stub_inst = *(jint*) branch; stub_inst = patched_branch(target - branch, stub_inst, 0); }
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/assembler_x86.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -2041,7 +2041,7 @@ } } -void Assembler::jccb(Condition cc, Label& L) { +void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { if (L.is_bound()) { const int short_size = 2; address entry = target(L); @@ -2051,7 +2051,7 @@ if (delta != 0) { dist += (dist < 0 ? (-delta) :delta); } - assert(is8bit(dist), "Dispacement too large for a short jmp"); + assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = (intptr_t)entry - (intptr_t)pc(); // 0111 tttn #8-bit disp @@ -2059,7 +2059,7 @@ emit_int8((offs - short_size) & 0xFF); } else { InstructionMark im(this); - L.add_patch_at(code(), locator()); + L.add_patch_at(code(), locator(), file, line); emit_int8(0x70 | cc); emit_int8(0); } @@ -2114,7 +2114,7 @@ emit_data(disp, rspec.reloc(), call32_operand); } -void Assembler::jmpb(Label& L) { +void Assembler::jmpb_0(Label& L, const char* file, int line) { if (L.is_bound()) { const int short_size = 2; address entry = target(L); @@ -2125,14 +2125,14 @@ if (delta != 0) { dist += (dist < 0 ? (-delta) :delta); } - assert(is8bit(dist), "Dispacement too large for a short jmp"); + assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = entry - pc(); emit_int8((unsigned char)0xEB); emit_int8((offs - short_size) & 0xFF); } else { InstructionMark im(this); - L.add_patch_at(code(), locator()); + L.add_patch_at(code(), locator(), file, line); emit_int8((unsigned char)0xEB); emit_int8(0); }
--- a/src/hotspot/cpu/x86/assembler_x86.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/assembler_x86.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -1302,7 +1302,11 @@ // WARNING: be very careful using this for forward jumps. If the label is // not bound within an 8-bit offset of this instruction, a run-time error // will occur. - void jccb(Condition cc, Label& L); + + // Use macro to record file and line number. + #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__) + + void jccb_0(Condition cc, Label& L, const char* file, int line); void jmp(Address entry); // pc <- entry @@ -1315,7 +1319,11 @@ // WARNING: be very careful using this for forward jumps. If the label is // not bound within an 8-bit offset of this instruction, a run-time error // will occur. - void jmpb(Label& L); + + // Use macro to record file and line number. + #define jmpb(L) jmpb_0(L, __FILE__, __LINE__) + + void jmpb_0(Label& L, const char* file, int line); void ldmxcsr( Address src );
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -3605,7 +3605,7 @@ } } else { __ testptr(tmp, tmp); - __ jccb(Assembler::notZero, update); + __ jcc(Assembler::notZero, update); __ stop("unexpect null obj"); #endif } @@ -3620,7 +3620,7 @@ __ push(tmp); __ mov_metadata(tmp, exact_klass->constant_encoding()); __ cmpptr(tmp, Address(rsp, 0)); - __ jccb(Assembler::equal, ok); + __ jcc(Assembler::equal, ok); __ stop("exact klass and actual klass differ"); __ bind(ok); __ pop(tmp);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -4166,19 +4166,22 @@ if ((dst_enc < 16) && (nds_enc < 16)) { vandps(dst, nds, negate_field, vector_len); } else if ((src_enc < 16) && (dst_enc < 16)) { + // Use src scratch register evmovdqul(src, nds, Assembler::AVX_512bit); vandps(dst, src, negate_field, vector_len); + } else if (dst_enc < 16) { + evmovdqul(dst, nds, Assembler::AVX_512bit); + vandps(dst, dst, negate_field, vector_len); + } else if (nds_enc < 16) { + vandps(nds, nds, negate_field, vector_len); + evmovdqul(dst, nds, Assembler::AVX_512bit); } else if (src_enc < 16) { evmovdqul(src, nds, Assembler::AVX_512bit); vandps(src, src, negate_field, vector_len); evmovdqul(dst, src, Assembler::AVX_512bit); - } else if (dst_enc < 16) { - evmovdqul(src, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, nds, Assembler::AVX_512bit); - vandps(dst, xmm0, negate_field, vector_len); - evmovdqul(xmm0, src, Assembler::AVX_512bit); } else { if (src_enc != dst_enc) { + // Use src scratch register evmovdqul(src, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); vandps(xmm0, xmm0, negate_field, vector_len); @@ -4201,17 +4204,19 @@ if ((dst_enc < 16) && (nds_enc < 16)) { vandpd(dst, nds, negate_field, vector_len); } else if ((src_enc < 16) && (dst_enc < 16)) { + // Use src scratch register evmovdqul(src, nds, Assembler::AVX_512bit); vandpd(dst, src, negate_field, vector_len); + } else if (dst_enc < 16) { + evmovdqul(dst, nds, Assembler::AVX_512bit); + vandpd(dst, dst, negate_field, vector_len); + } else if (nds_enc < 16) { + vandpd(nds, nds, negate_field, vector_len); + evmovdqul(dst, nds, Assembler::AVX_512bit); } else if (src_enc < 16) { evmovdqul(src, nds, Assembler::AVX_512bit); vandpd(src, src, negate_field, vector_len); evmovdqul(dst, src, Assembler::AVX_512bit); - } else if (dst_enc < 16) { - evmovdqul(src, xmm0, Assembler::AVX_512bit); - evmovdqul(xmm0, nds, Assembler::AVX_512bit); - vandpd(dst, xmm0, negate_field, vector_len); - evmovdqul(xmm0, src, Assembler::AVX_512bit); } else { if (src_enc != dst_enc) { evmovdqul(src, xmm0, Assembler::AVX_512bit); @@ -4282,6 +4287,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpaddb(xmm0, xmm0, src, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4330,7 +4336,7 @@ } else if (dst_enc < 16) { Assembler::vpaddw(dst, dst, src, vector_len); } else if (nds_enc < 16) { - // implies dst_enc in upper bank with src as scratch + // implies dst_enc in upper bank with nds as scratch evmovdqul(nds, dst, Assembler::AVX_512bit); Assembler::vpaddw(nds, nds, src, vector_len); evmovdqul(dst, nds, Assembler::AVX_512bit); @@ -4339,6 +4345,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpaddw(xmm0, xmm0, src, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4522,6 +4529,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpmullw(xmm0, xmm0, src, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4578,7 +4586,8 @@ // worse case scenario, all regs in upper bank evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); - Assembler::vpsubw(xmm0, xmm0, src, vector_len); + Assembler::vpsubb(xmm0, xmm0, src, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4636,6 +4645,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsubw(xmm0, xmm0, src, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4649,7 +4659,7 @@ } else if ((dst_enc < 16) && (shift_enc < 16)) { Assembler::vpsraw(dst, dst, shift, vector_len); } else if ((dst_enc < 16) && (nds_enc < 16)) { - // use nds_enc as scratch with shift + // use nds as scratch with shift evmovdqul(nds, shift, Assembler::AVX_512bit); Assembler::vpsraw(dst, dst, nds, vector_len); } else if ((shift_enc < 16) && (nds_enc < 16)) { @@ -4664,7 +4674,7 @@ Assembler::vpsraw(dst, dst, xmm0, vector_len); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else if (nds_enc < 16) { - // use nds as dest as temps + // use nds and dst as temps evmovdqul(nds, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, shift, Assembler::AVX_512bit); @@ -4677,8 +4687,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); - Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); - evmovdqul(xmm1, dst, Assembler::AVX_512bit); + Assembler::vpsraw(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); pop_zmm(xmm1); @@ -4702,6 +4711,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsraw(xmm0, xmm0, shift, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4715,7 +4725,7 @@ } else if ((dst_enc < 16) && (shift_enc < 16)) { Assembler::vpsrlw(dst, dst, shift, vector_len); } else if ((dst_enc < 16) && (nds_enc < 16)) { - // use nds_enc as scratch with shift + // use nds as scratch with shift evmovdqul(nds, shift, Assembler::AVX_512bit); Assembler::vpsrlw(dst, dst, nds, vector_len); } else if ((shift_enc < 16) && (nds_enc < 16)) { @@ -4730,7 +4740,7 @@ Assembler::vpsrlw(dst, dst, xmm0, vector_len); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else if (nds_enc < 16) { - // use nds as dest as temps + // use nds and dst as temps evmovdqul(nds, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, shift, Assembler::AVX_512bit); @@ -4743,8 +4753,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); - Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); - evmovdqul(xmm1, dst, Assembler::AVX_512bit); + Assembler::vpsrlw(xmm0, xmm0, xmm1, vector_len); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); pop_zmm(xmm1); @@ -4768,6 +4777,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsrlw(xmm0, xmm0, shift, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -4781,7 +4791,7 @@ } else if ((dst_enc < 16) && (shift_enc < 16)) { Assembler::vpsllw(dst, dst, shift, vector_len); } else if ((dst_enc < 16) && (nds_enc < 16)) { - // use nds_enc as scratch with shift + // use nds as scratch with shift evmovdqul(nds, shift, Assembler::AVX_512bit); Assembler::vpsllw(dst, dst, nds, vector_len); } else if ((shift_enc < 16) && (nds_enc < 16)) { @@ -4796,7 +4806,7 @@ Assembler::vpsllw(dst, dst, xmm0, vector_len); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } else if (nds_enc < 16) { - // use nds as dest as temps + // use nds and dst as temps evmovdqul(nds, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, shift, Assembler::AVX_512bit); @@ -4810,7 +4820,6 @@ evmovdqul(xmm1, shift, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); - evmovdqul(xmm1, dst, Assembler::AVX_512bit); evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); pop_zmm(xmm1); @@ -4834,6 +4843,7 @@ evmovdqul(nds, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, dst, Assembler::AVX_512bit); Assembler::vpsllw(xmm0, xmm0, shift, vector_len); + evmovdqul(dst, xmm0, Assembler::AVX_512bit); evmovdqul(xmm0, nds, Assembler::AVX_512bit); } } @@ -7130,7 +7140,7 @@ bind(RET_NOT_FOUND); movl(result, -1); - jmpb(CLEANUP); + jmp(CLEANUP); bind(FOUND_SUBSTR); // Compute start addr of substr @@ -7148,7 +7158,7 @@ addl(tmp, cnt2); // Found result if we matched whole substring. cmpl(tmp, stride); - jccb(Assembler::lessEqual, RET_FOUND); + jcc(Assembler::lessEqual, RET_FOUND); // Repeat search for small substring (<= 8 chars) // from new point 'str1' without reloading substring. @@ -7248,7 +7258,7 @@ jcc(Assembler::carryClear, FOUND_CHAR); addptr(result, 32); subl(tmp, 2*stride); - jccb(Assembler::notZero, SCAN_TO_16_CHAR_LOOP); + jcc(Assembler::notZero, SCAN_TO_16_CHAR_LOOP); jmp(SCAN_TO_8_CHAR); bind(SCAN_TO_8_CHAR_INIT); movdl(vec1, ch); @@ -7278,7 +7288,7 @@ jcc(Assembler::carryClear, FOUND_CHAR); addptr(result, 16); subl(tmp, stride); - jccb(Assembler::notZero, SCAN_TO_8_CHAR_LOOP); + jcc(Assembler::notZero, SCAN_TO_8_CHAR_LOOP); bind(SCAN_TO_CHAR); testl(cnt1, cnt1); jcc(Assembler::zero, RET_NOT_FOUND); @@ -7857,7 +7867,7 @@ // Compare 16-byte vectors andl(result, 0x0000000f); // tail count (in bytes) andl(len, 0xfffffff0); // vector count (in bytes) - jccb(Assembler::zero, COMPARE_TAIL); + jcc(Assembler::zero, COMPARE_TAIL); lea(ary1, Address(ary1, len, Address::times_1)); negptr(len); @@ -7869,12 +7879,12 @@ bind(COMPARE_WIDE_VECTORS); movdqu(vec1, Address(ary1, len, Address::times_1)); ptest(vec1, vec2); - jccb(Assembler::notZero, TRUE_LABEL); + jcc(Assembler::notZero, TRUE_LABEL); addptr(len, 16); jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); testl(result, result); - jccb(Assembler::zero, FALSE_LABEL); + jcc(Assembler::zero, FALSE_LABEL); movdqu(vec1, Address(ary1, result, Address::times_1, -16)); ptest(vec1, vec2); @@ -9069,7 +9079,7 @@ jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found addq(result, 32); subq(length, 32); - jccb(Assembler::greaterEqual, VECTOR32_LOOP); + jcc(Assembler::greaterEqual, VECTOR32_LOOP); addq(length, 32); jcc(Assembler::equal, SAME_TILL_END); //falling through if less than 32 bytes left //close the branch here. @@ -9140,24 +9150,24 @@ load_unsigned_byte(tmp2, Address(objb, result)); xorl(tmp1, tmp2); testl(tmp1, tmp1); - jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found + jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found decq(length); - jccb(Assembler::zero, SAME_TILL_END); + jcc(Assembler::zero, SAME_TILL_END); incq(result); load_unsigned_byte(tmp1, Address(obja, result)); load_unsigned_byte(tmp2, Address(objb, result)); xorl(tmp1, tmp2); testl(tmp1, tmp1); - jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found + jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found decq(length); - jccb(Assembler::zero, SAME_TILL_END); + jcc(Assembler::zero, SAME_TILL_END); incq(result); load_unsigned_byte(tmp1, Address(obja, result)); load_unsigned_byte(tmp2, Address(objb, result)); xorl(tmp1, tmp2); testl(tmp1, tmp1); - jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found - jmpb(SAME_TILL_END); + jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found + jmp(SAME_TILL_END); if (UseAVX >= 2) { bind(VECTOR32_NOT_EQUAL); @@ -9168,7 +9178,7 @@ bsfq(tmp1, tmp1); addq(result, tmp1); shrq(result); - jmpb(DONE); + jmp(DONE); } bind(VECTOR16_NOT_EQUAL); @@ -10590,7 +10600,7 @@ andl(len, 0xfffffff0); // vector count (in chars) andl(result, 0x0000000f); // tail count (in chars) testl(len, len); - jccb(Assembler::zero, copy_16); + jcc(Assembler::zero, copy_16); // compress 16 chars per iter movdl(tmp1Reg, tmp5);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -99,7 +99,7 @@ // Required platform-specific helpers for Label::patch_instructions. // They _shadow_ the declarations in AbstractAssembler, which are undefined. - void pd_patch_instruction(address branch, address target) { + void pd_patch_instruction(address branch, address target, const char* file, int line) { unsigned char op = branch[0]; assert(op == 0xE8 /* call */ || op == 0xE9 /* jmp */ || @@ -113,7 +113,7 @@ // short offset operators (jmp and jcc) char* disp = (char*) &branch[1]; int imm8 = target - (address) &disp[1]; - guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); + guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line); *disp = imm8; } else { int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -275,7 +275,7 @@ if (EnableJVMCI) { Label L; __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0); - __ jccb(Assembler::zero, L); + __ jcc(Assembler::zero, L); __ stop("unexpected pending monitor in deopt entry"); __ bind(L); }
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -4080,7 +4080,7 @@ // make sure rdx was multiple of 8 Label L; // Ignore partial flag stall after shrl() since it is debug VM - __ jccb(Assembler::carryClear, L); + __ jcc(Assembler::carryClear, L); __ stop("object size is not multiple of 2 - adjust this code"); __ bind(L); // rdx must be > 0, no extra check needed here
--- a/src/hotspot/cpu/x86/x86.ad Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/x86/x86.ad Tue Sep 04 12:44:02 2018 -0700 @@ -1252,8 +1252,8 @@ #ifdef _LP64 static uint size_deopt_handler() { - // three 5 byte instructions - return 15; + // three 5 byte instructions plus one move for unreachable address. + return 15+3; } #else static uint size_deopt_handler() { @@ -1322,7 +1322,7 @@ #endif __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); - assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); + assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset)); __ end_a_stub(); return offset; } @@ -5645,7 +5645,7 @@ %} instruct vadd4B_mem_evex_special(vecS dst, vecS src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 4); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 4); match(Set dst (AddVB dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddb $dst,$src,$mem\t! add packed4B" %} @@ -5723,7 +5723,7 @@ %} instruct vadd8B_mem_evex_special(vecD dst, vecD src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 8); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 8); match(Set dst (AddVB dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddb $dst,$src,$mem\t! add packed8B" %} @@ -5801,7 +5801,7 @@ %} instruct vadd16B_mem_evex_special(vecX dst, vecX src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 16); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 16); match(Set dst (AddVB dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %} @@ -5869,7 +5869,7 @@ %} instruct vadd32B_mem_evex_special(vecY dst, vecY src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 32); match(Set dst (AddVB dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %} @@ -5970,7 +5970,7 @@ %} instruct vadd2S_mem_evex_special(vecS dst, vecS src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 2); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 2); match(Set dst (AddVS dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddw $dst,$src,$mem\t! add packed2S" %} @@ -6048,7 +6048,7 @@ %} instruct vadd4S_mem_evex_special(vecD dst, vecD src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 4); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 4); match(Set dst (AddVS dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddw $dst,$src,$mem\t! add packed4S" %} @@ -6126,7 +6126,7 @@ %} instruct vadd8S_mem_evex_special(vecX dst, vecX src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 8); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 8); match(Set dst (AddVS dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %} @@ -6194,7 +6194,7 @@ %} instruct vadd16S_mem_evex_special(vecY dst, vecY src, memory mem) %{ - predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 16); + predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 16); match(Set dst (AddVS dst (LoadVector mem))); effect(TEMP src); format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %}
--- a/src/hotspot/cpu/zero/assembler_zero.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/zero/assembler_zero.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -49,7 +49,7 @@ } #endif -void Assembler::pd_patch_instruction(address branch, address target) { +void Assembler::pd_patch_instruction(address branch, address target, const char* file, int line) { ShouldNotCallThis(); }
--- a/src/hotspot/cpu/zero/assembler_zero.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/cpu/zero/assembler_zero.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -36,7 +36,7 @@ Assembler(CodeBuffer* code) : AbstractAssembler(code) {} public: - void pd_patch_instruction(address branch, address target); + void pd_patch_instruction(address branch, address target, const char* file, int line); }; class MacroAssembler : public Assembler {
--- a/src/hotspot/share/asm/assembler.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/share/asm/assembler.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -151,12 +151,16 @@ } // end (UseStackBanging) } -void Label::add_patch_at(CodeBuffer* cb, int branch_loc) { +void Label::add_patch_at(CodeBuffer* cb, int branch_loc, const char* file, int line) { assert(_loc == -1, "Label is unbound"); // Don't add patch locations during scratch emit. if (cb->insts()->scratch_emit()) { return; } if (_patch_index < PatchCacheSize) { _patches[_patch_index] = branch_loc; +#ifdef ASSERT + _lines[_patch_index] = line; + _files[_patch_index] = file; +#endif } else { if (_patch_overflow == NULL) { _patch_overflow = cb->create_patch_overflow(); @@ -174,10 +178,16 @@ while (_patch_index > 0) { --_patch_index; int branch_loc; + int line = 0; + const char* file = NULL; if (_patch_index >= PatchCacheSize) { branch_loc = _patch_overflow->pop(); } else { branch_loc = _patches[_patch_index]; +#ifdef ASSERT + line = _lines[_patch_index]; + file = _files[_patch_index]; +#endif } int branch_sect = CodeBuffer::locator_sect(branch_loc); address branch = cb->locator_address(branch_loc); @@ -201,7 +211,7 @@ #endif //ASSERT // Push the target offset into the branch instruction. - masm->pd_patch_instruction(branch, target); + masm->pd_patch_instruction(branch, target, file, line); } }
--- a/src/hotspot/share/asm/assembler.hpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/share/asm/assembler.hpp Tue Sep 04 12:44:02 2018 -0700 @@ -73,7 +73,7 @@ */ class Label { private: - enum { PatchCacheSize = 4 }; + enum { PatchCacheSize = 4 debug_only( +4 ) }; // _loc encodes both the binding state (via its sign) // and the binding locator (via its value) of a label. @@ -98,6 +98,11 @@ // The label will be bound to a location near its users. bool _is_near; +#ifdef ASSERT + // Sourcre file and line location of jump instruction + int _lines[PatchCacheSize]; + const char* _files[PatchCacheSize]; +#endif public: /** @@ -141,7 +146,7 @@ * @param cb the code buffer being patched * @param branch_loc the locator of the branch instruction in the code buffer */ - void add_patch_at(CodeBuffer* cb, int branch_loc); + void add_patch_at(CodeBuffer* cb, int branch_loc, const char* file = NULL, int line = 0); /** * Iterate over the list of patches, resolving the instructions @@ -447,7 +452,7 @@ * @param branch the location of the instruction to patch * @param masm the assembler which generated the branch */ - void pd_patch_instruction(address branch, address target); + void pd_patch_instruction(address branch, address target, const char* file, int line); };
--- a/src/hotspot/share/opto/compile.cpp Tue Sep 04 18:32:28 2018 +0100 +++ b/src/hotspot/share/opto/compile.cpp Tue Sep 04 12:44:02 2018 -0700 @@ -544,7 +544,9 @@ ResourceMark rm; _scratch_const_size = const_size; - int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size); + int locs_size = sizeof(relocInfo) * MAX_locs_size; + int slop = 2 * CodeSection::end_slop(); // space between sections + int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size + slop + locs_size); blob = BufferBlob::create("Compile::scratch_buffer", size); // Record the buffer blob for next time. set_scratch_buffer_blob(blob);