--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -116,3 +116,9 @@
__ verify_oop(value);
__ bind(done);
}
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
+ Register obj, Register tmp, Label& slowpath) {
+ __ clrrdi(dst, obj, JNIHandles::weak_tag_size);
+ __ ld(dst, 0, dst); // Resolve (untagged) jobject.
+}
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp Tue Jul 30 17:54:53 2019 +0200
@@ -49,6 +49,9 @@
virtual void resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2, bool needs_frame);
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register dst, Register jni_env,
+ Register obj, Register tmp, Label& slowpath);
+
virtual void barrier_stubs_init() {}
};
--- a/src/hotspot/cpu/ppc/jniFastGetField_ppc.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/ppc/jniFastGetField_ppc.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,16 +24,138 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/safepoint.hpp"
+#define __ masm->
+
+#define BUFFER_SIZE 48*BytesPerInstWord
+
+
+// Common register usage:
+// R3/F0: result
+// R3_ARG1: jni env
+// R4_ARG2: obj
+// R5_ARG3: jfield id
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
- // We don't have fast jni accessors.
- return (address) -1;
+ const char *name;
+ switch (type) {
+ case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
+ case T_BYTE: name = "jni_fast_GetByteField"; break;
+ case T_CHAR: name = "jni_fast_GetCharField"; break;
+ case T_SHORT: name = "jni_fast_GetShortField"; break;
+ case T_INT: name = "jni_fast_GetIntField"; break;
+ case T_LONG: name = "jni_fast_GetLongField"; break;
+ case T_FLOAT: name = "jni_fast_GetFloatField"; break;
+ case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
+ default: ShouldNotReachHere();
+ name = NULL; // unreachable
+ }
+ ResourceMark rm;
+ BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
+ CodeBuffer cbuf(blob);
+ MacroAssembler* masm = new MacroAssembler(&cbuf);
+ address fast_entry = __ function_entry();
+
+ Label slow;
+
+ const Register Rcounter_addr = R6_ARG4,
+ Rcounter = R7_ARG5,
+ Robj = R8_ARG6,
+ Rtmp = R9_ARG7;
+ const int counter_offs = __ load_const_optimized(Rcounter_addr,
+ SafepointSynchronize::safepoint_counter_addr(),
+ R0, true);
+
+ __ ld(Rcounter, counter_offs, Rcounter_addr);
+ __ andi_(R0, Rcounter, 1);
+ __ bne(CCR0, slow);
+
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ // Field may be volatile.
+ __ fence();
+ } else {
+ // Using acquire to order wrt. JVMTI check and load of result.
+ __ isync(); // order wrt. to following load(s)
+ }
+
+ if (JvmtiExport::can_post_field_access()) {
+ // Check to see if a field access watch has been set before we
+ // take the fast path.
+ int fac_offs = __ load_const_optimized(Rtmp, JvmtiExport::get_field_access_count_addr(),
+ R0, true);
+ __ lwa(Rtmp, fac_offs, Rtmp);
+ __ cmpwi(CCR0, Rtmp, 0);
+ __ bne(CCR0, slow);
+ }
+
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->try_resolve_jobject_in_native(masm, Robj, R3_ARG1, R4_ARG2, Rtmp, slow);
+
+ __ srwi(Rtmp, R5_ARG3, 2); // offset
+
+ assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
+ speculative_load_pclist[count] = __ pc(); // Used by the segfault handler
+ bool is_fp = false;
+ switch (type) {
+ case T_BOOLEAN: __ lbzx(Rtmp, Rtmp, Robj); break;
+ case T_BYTE: __ lbzx(Rtmp, Rtmp, Robj); __ extsb(Rtmp, Rtmp); break;
+ case T_CHAR: __ lhzx(Rtmp, Rtmp, Robj); break;
+ case T_SHORT: __ lhax(Rtmp, Rtmp, Robj); break;
+ case T_INT: __ lwax(Rtmp, Rtmp, Robj); break;
+ case T_LONG: __ ldx( Rtmp, Rtmp, Robj); break;
+ case T_FLOAT: __ lfsx(F1_RET, Rtmp, Robj); is_fp = true; break;
+ case T_DOUBLE: __ lfdx(F1_RET, Rtmp, Robj); is_fp = true; break;
+ default: ShouldNotReachHere();
+ }
+
+ // Order preceding load(s) wrt. succeeding check (LoadStore for volatile field).
+ if (is_fp) {
+ Label next;
+ __ fcmpu(CCR0, F1_RET, F1_RET);
+ __ bne(CCR0, next);
+ __ bind(next);
+ } else {
+ __ twi_0(Rtmp);
+ }
+ __ isync();
+
+ __ ld(R0, counter_offs, Rcounter_addr);
+ __ cmpd(CCR0, R0, Rcounter);
+ __ bne(CCR0, slow);
+
+ if (!is_fp) {
+ __ mr(R3_RET, Rtmp);
+ }
+ __ blr();
+
+ slowcase_entry_pclist[count++] = __ pc();
+ __ bind(slow);
+ address slow_case_addr;
+ switch (type) {
+ case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
+ case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
+ case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break;
+ case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break;
+ case T_INT: slow_case_addr = jni_GetIntField_addr(); break;
+ case T_LONG: slow_case_addr = jni_GetLongField_addr(); break;
+ case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
+ case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
+ default: ShouldNotReachHere();
+ slow_case_addr = NULL; // unreachable
+ }
+ __ load_const_optimized(R12, slow_case_addr, R0);
+ __ call_c_and_return_to_caller(R12); // tail call
+
+ __ flush();
+
+ return fast_entry;
}
address JNI_FastGetField::generate_fast_get_boolean_field() {
@@ -57,19 +179,13 @@
}
address JNI_FastGetField::generate_fast_get_long_field() {
- // We don't have fast jni accessors.
- return (address) -1;
-}
-
-address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
- // We don't have fast jni accessors.
- return (address) -1;
+ return generate_fast_get_int_field0(T_LONG);
}
address JNI_FastGetField::generate_fast_get_float_field() {
- return generate_fast_get_float_field0(T_FLOAT);
+ return generate_fast_get_int_field0(T_FLOAT);
}
address JNI_FastGetField::generate_fast_get_double_field() {
- return generate_fast_get_float_field0(T_DOUBLE);
+ return generate_fast_get_int_field0(T_DOUBLE);
}
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -111,3 +111,9 @@
__ verify_oop(value);
__ bind(Ldone);
}
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
+ Register obj, Register tmp, Label& slowpath) {
+ __ z_nill(obj, ~JNIHandles::weak_tag_mask);
+ __ z_lg(obj, 0, obj); // Resolve (untagged) jobject.
+}
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp Tue Jul 30 17:54:53 2019 +0200
@@ -46,6 +46,9 @@
virtual void resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2);
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
+ Register obj, Register tmp, Label& slowpath);
+
virtual void barrier_stubs_init() {}
};
--- a/src/hotspot/cpu/s390/jniFastGetField_s390.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/cpu/s390/jniFastGetField_s390.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -24,8 +24,13 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
+#include "runtime/safepoint.hpp"
// TSO ensures that loads are blocking and ordered with respect to
// to earlier loads, so we don't need LoadLoad membars.
@@ -34,9 +39,103 @@
#define BUFFER_SIZE 30*sizeof(jint)
+// Common register usage:
+// Z_RET/Z_FRET: result
+// Z_ARG1: jni env
+// Z_ARG2: obj
+// Z_ARG3: jfield id
+
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
- // Don't use fast jni accessors.
- return (address) -1;
+ const char *name;
+ switch (type) {
+ case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
+ case T_BYTE: name = "jni_fast_GetByteField"; break;
+ case T_CHAR: name = "jni_fast_GetCharField"; break;
+ case T_SHORT: name = "jni_fast_GetShortField"; break;
+ case T_INT: name = "jni_fast_GetIntField"; break;
+ case T_LONG: name = "jni_fast_GetLongField"; break;
+ case T_FLOAT: name = "jni_fast_GetFloatField"; break;
+ case T_DOUBLE: name = "jni_fast_GetDoubleField"; break;
+ default: ShouldNotReachHere();
+ name = NULL; // unreachable
+ }
+ ResourceMark rm;
+ BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
+ CodeBuffer cbuf(blob);
+ MacroAssembler* masm = new MacroAssembler(&cbuf);
+ address fast_entry = __ pc();
+
+ Label slow;
+
+ // We can only kill the remaining volatile registers.
+ const Register Rcounter = Z_ARG4,
+ Robj = Z_R1_scratch,
+ Rtmp = Z_R0_scratch;
+ __ load_const_optimized(Robj, SafepointSynchronize::safepoint_counter_addr());
+ __ z_lg(Rcounter, Address(Robj));
+ __ z_tmll(Rcounter, 1);
+ __ z_brnaz(slow);
+
+ if (JvmtiExport::can_post_field_access()) {
+ // Check to see if a field access watch has been set before we
+ // take the fast path.
+ __ load_const_optimized(Robj, JvmtiExport::get_field_access_count_addr());
+ __ z_lt(Robj, Address(Robj));
+ __ z_brne(slow);
+ }
+
+ __ z_lgr(Robj, Z_ARG2);
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->try_resolve_jobject_in_native(masm, Z_ARG1, Robj, Rtmp, slow);
+
+ __ z_srlg(Rtmp, Z_ARG3, 2); // offset
+ __ z_agr(Robj, Rtmp);
+
+ assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
+ speculative_load_pclist[count] = __ pc(); // Used by the segfault handler
+ bool is_fp = false;
+ switch (type) {
+ case T_BOOLEAN: __ z_llgc(Rtmp, Address(Robj)); break;
+ case T_BYTE: __ z_lgb( Rtmp, Address(Robj)); break;
+ case T_CHAR: __ z_llgh(Rtmp, Address(Robj)); break;
+ case T_SHORT: __ z_lgh( Rtmp, Address(Robj)); break;
+ case T_INT: __ z_lgf( Rtmp, Address(Robj)); break;
+ case T_LONG: __ z_lg( Rtmp, Address(Robj)); break;
+ case T_FLOAT: __ mem2freg_opt(Z_FRET, Address(Robj), false); is_fp = true; break;
+ case T_DOUBLE: __ mem2freg_opt(Z_FRET, Address(Robj), true ); is_fp = true; break;
+ default: ShouldNotReachHere();
+ }
+
+ __ load_const_optimized(Robj, SafepointSynchronize::safepoint_counter_addr());
+ __ z_cg(Rcounter, Address(Robj));
+ __ z_brne(slow);
+
+ if (!is_fp) {
+ __ z_lgr(Z_RET, Rtmp);
+ }
+ __ z_br(Z_R14);
+
+ slowcase_entry_pclist[count++] = __ pc();
+ __ bind(slow);
+ address slow_case_addr;
+ switch (type) {
+ case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
+ case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break;
+ case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break;
+ case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break;
+ case T_INT: slow_case_addr = jni_GetIntField_addr(); break;
+ case T_LONG: slow_case_addr = jni_GetLongField_addr(); break;
+ case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break;
+ case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break;
+ default: ShouldNotReachHere();
+ slow_case_addr = NULL; // unreachable
+ }
+ __ load_const_optimized(Robj, slow_case_addr);
+ __ z_br(Robj); // tail call
+
+ __ flush();
+
+ return fast_entry;
}
address JNI_FastGetField::generate_fast_get_boolean_field() {
@@ -60,19 +159,13 @@
}
address JNI_FastGetField::generate_fast_get_long_field() {
- // Don't use fast jni accessors.
- return (address) -1;
-}
-
-address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
- // Don't use fast jni accessors.
- return (address) -1;
+ return generate_fast_get_int_field0(T_LONG);
}
address JNI_FastGetField::generate_fast_get_float_field() {
- return generate_fast_get_float_field0(T_FLOAT);
+ return generate_fast_get_int_field0(T_FLOAT);
}
address JNI_FastGetField::generate_fast_get_double_field() {
- return generate_fast_get_float_field0(T_DOUBLE);
+ return generate_fast_get_int_field0(T_DOUBLE);
}
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -477,6 +477,15 @@
return 1;
}
}
+
+ // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
+ // and the heap gets shrunk before the field access.
+ if ((sig == SIGSEGV) || (sig == SIGBUS)) {
+ address addr = JNI_FastGetField::find_slowcase_pc(pc);
+ if (addr != (address)-1) {
+ stub = addr;
+ }
+ }
}
run_stub:
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -502,6 +502,15 @@
return true;
}
}
+
+ // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
+ // and the heap gets shrunk before the field access.
+ if ((sig == SIGSEGV) || (sig == SIGBUS)) {
+ address addr = JNI_FastGetField::find_slowcase_pc(pc);
+ if (addr != (address)-1) {
+ stub = addr;
+ }
+ }
}
if (stub != NULL) {
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp Tue Jul 30 09:56:18 2019 -0400
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp Tue Jul 30 17:54:53 2019 +0200
@@ -479,6 +479,15 @@
return true;
}
}
+
+ // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
+ // and the heap gets shrunk before the field access.
+ if ((sig == SIGSEGV) || (sig == SIGBUS)) {
+ address addr = JNI_FastGetField::find_slowcase_pc(pc);
+ if (addr != (address)-1) {
+ stub = addr;
+ }
+ }
}
if (stub != NULL) {