--- a/src/hotspot/cpu/aarch64/aarch64.ad Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Fri Jun 28 17:10:22 2019 +0300
@@ -1761,6 +1761,17 @@
// branch if we need to invalidate the method later
__ nop();
+ if (C->clinit_barrier_on_entry()) {
+ assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
+
+ Label L_skip_barrier;
+
+ __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ bind(L_skip_barrier);
+ }
+
int bangsize = C->bang_size_in_bytes();
if (C->need_stack_bang(bangsize) && UseStackBanging)
__ generate_stack_overflow_check(bangsize);
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -317,7 +317,15 @@
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
- ShouldNotReachHere(); // not implemented
+ assert(VM_Version::supports_fast_class_init_checks(), "sanity");
+ assert(!method->holder()->is_not_initialized(), "initialization should have been started");
+
+ Label L_skip_barrier;
+
+ __ mov_metadata(rscratch2, method->holder()->constant_encoding());
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ bind(L_skip_barrier);
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -331,11 +331,6 @@
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
- // If we have to make this method not-entrant we'll overwrite its
- // first instruction with a jump. For this action to be legal we
- // must ensure that this first instruction is a B, BL, NOP, BKPT,
- // SVC, HVC, or SMC. Make it a NOP.
- nop();
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter().
@@ -355,6 +350,11 @@
void C1_MacroAssembler::verified_entry() {
+ // If we have to make this method not-entrant we'll overwrite its
+ // first instruction with a jump. For this action to be legal we
+ // must ensure that this first instruction is a B, BL, NOP, BKPT,
+ // SVC, HVC, or SMC. Make it a NOP.
+ nop();
}
void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -288,6 +288,18 @@
ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
}
+void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
+ Register method,
+ Register cache) {
+ const int method_offset = in_bytes(
+ ConstantPoolCache::base_offset() +
+ ((byte_no == TemplateTable::f2_byte)
+ ? ConstantPoolCacheEntry::f2_offset()
+ : ConstantPoolCacheEntry::f1_offset()));
+
+ ldr(method, Address(cache, method_offset)); // get f1 Method*
+}
+
// Generate a subtype check: branch to ok_is_subtype if sub_klass is a
// subtype of super_klass.
//
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp Fri Jun 28 17:10:22 2019 +0300
@@ -124,6 +124,8 @@
// load cpool->resolved_klass_at(index);
void load_resolved_klass_at_offset(Register cpool, Register index, Register klass, Register temp);
+ void load_resolved_method_at_index(int byte_no, Register method, Register cache);
+
void pop_ptr(Register r = r0);
void pop_i(Register r = r0);
void pop_l(Register r = r0);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -1307,6 +1307,35 @@
bind(L_fallthrough);
}
+void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) {
+ assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
+ assert_different_registers(klass, rthread, scratch);
+
+ Label L_fallthrough, L_tmp;
+ if (L_fast_path == NULL) {
+ L_fast_path = &L_fallthrough;
+ } else if (L_slow_path == NULL) {
+ L_slow_path = &L_fallthrough;
+ }
+ // Fast path check: class is fully initialized
+ ldrb(scratch, Address(klass, InstanceKlass::init_state_offset()));
+ subs(zr, scratch, InstanceKlass::fully_initialized);
+ br(Assembler::EQ, *L_fast_path);
+
+ // Fast path check: current thread is initializer thread
+ ldr(scratch, Address(klass, InstanceKlass::init_thread_offset()));
+ cmp(rthread, scratch);
+
+ if (L_slow_path == &L_fallthrough) {
+ br(Assembler::EQ, *L_fast_path);
+ bind(*L_slow_path);
+ } else if (L_fast_path == &L_fallthrough) {
+ br(Assembler::NE, *L_slow_path);
+ bind(*L_fast_path);
+ } else {
+ Unimplemented();
+ }
+}
void MacroAssembler::verify_oop(Register reg, const char* s) {
if (!VerifyOops) return;
@@ -3683,6 +3712,12 @@
bs->obj_equals(this, obj1, obj2);
}
+void MacroAssembler::load_method_holder(Register holder, Register method) {
+ ldr(holder, Address(method, Method::const_offset())); // ConstMethod*
+ ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool*
+ ldr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
+}
+
void MacroAssembler::load_klass(Register dst, Register src) {
if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Fri Jun 28 17:10:22 2019 +0300
@@ -788,6 +788,8 @@
// C 'boolean' to Java boolean: x == 0 ? 0 : 1
void c2bool(Register x);
+ void load_method_holder(Register holder, Register method);
+
// oop manipulations
void load_klass(Register dst, Register src);
void store_klass(Register dst, Register src);
@@ -926,6 +928,11 @@
Register temp_reg,
Label& L_success);
+ void clinit_barrier(Register klass,
+ Register thread,
+ Label* L_fast_path = NULL,
+ Label* L_slow_path = NULL);
+
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -799,6 +799,22 @@
}
#endif
+ // Class initialization barrier for static methods
+ if (VM_Version::supports_fast_class_init_checks()) {
+ Label L_skip_barrier;
+
+ { // Bypass the barrier for non-static methods
+ __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
+ __ andsw(zr, rscratch1, JVM_ACC_STATIC);
+ __ br(Assembler::EQ, L_skip_barrier); // non-static
+ }
+
+ __ load_method_holder(rscratch2, rmethod);
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+ __ bind(L_skip_barrier);
+ }
+
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
__ flush();
@@ -1580,6 +1596,15 @@
// SVC, HVC, or SMC. Make it a NOP.
__ nop();
+ if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
+ Label L_skip_barrier;
+ __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
+ __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+
+ __ bind(L_skip_barrier);
+ }
+
// Generate stack overflow check
if (UseStackBanging) {
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Jun 28 17:10:22 2019 +0300
@@ -2323,7 +2323,7 @@
const Register temp = r19;
assert_different_registers(Rcache, index, temp);
- Label resolved;
+ Label resolved, clinit_barrier_slow;
Bytecodes::Code code = bytecode();
switch (code) {
@@ -2338,6 +2338,8 @@
__ br(Assembler::EQ, resolved);
// resolve first time through
+ // Class initialization barrier slow path lands here as well.
+ __ bind(clinit_barrier_slow);
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ mov(temp, (int) code);
__ call_VM(noreg, entry, temp);
@@ -2347,6 +2349,13 @@
// n.b. unlike x86 Rcache is now rcpool plus the indexed offset
// so all clients ofthis method must be modified accordingly
__ bind(resolved);
+
+ // Class initialization barrier for static methods
+ if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
+ __ load_resolved_method_at_index(byte_no, temp, Rcache);
+ __ load_method_holder(temp, temp);
+ __ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
+ }
}
// The Rcache and index registers must be set before call
@@ -3418,9 +3427,8 @@
__ profile_virtual_call(r3, r13, r19);
// Get declaring interface class from method, and itable index
- __ ldr(r0, Address(rmethod, Method::const_offset()));
- __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
- __ ldr(r0, Address(r0, ConstantPool::pool_holder_offset_in_bytes()));
+
+ __ load_method_holder(r0, rmethod);
__ ldrw(rmethod, Address(rmethod, Method::itable_index_offset()));
__ subw(rmethod, rmethod, Method::itable_index_max);
__ negw(rmethod, rmethod);
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp Fri Jun 28 11:26:07 2019 +0100
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp Fri Jun 28 17:10:22 2019 +0300
@@ -124,6 +124,7 @@
static int dcache_line_size() {
return (1 << ((_psr_info.ctr_el0 >> 16) & 0x0f)) * 4;
}
+ static bool supports_fast_class_init_checks() { return true; }
};
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP