8172144: AArch64: Implement "JEP 270: Reserved Stack Areas for Critical Sections"
Reviewed-by: fparain
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad Wed Jan 11 15:09:58 2017 +0000
@@ -3008,6 +3008,10 @@
__ notify(Assembler::method_reentry);
}
+ if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
+ __ reserved_stack_check();
+ }
+
if (do_polling() && C->is_method_compilation()) {
__ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
}
--- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -532,8 +532,14 @@
void LIR_Assembler::return_op(LIR_Opr result) {
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
+
// Pop the stack before the safepoint code
__ remove_frame(initial_frame_size_in_bytes());
+
+ if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
+ __ reserved_stack_check();
+ }
+
address polling_page(os::get_polling_page());
__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
__ ret(lr);
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -629,6 +629,7 @@
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method);
DESCRIBE_FP_OFFSET(interpreter_frame_mdp);
+ DESCRIBE_FP_OFFSET(interpreter_frame_mirror);
DESCRIBE_FP_OFFSET(interpreter_frame_cache);
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.hpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.hpp Wed Jan 11 15:09:58 2017 +0000
@@ -46,6 +46,9 @@
// [pointer to locals ] = locals() locals_offset
// [constant pool cache ] = cache() cache_offset
+// [klass of method ] = mirror() mirror_offset
+// [padding ]
+
// [methodData ] = mdp() mdx_offset
// [methodOop ] = method() method_offset
--- a/hotspot/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp Wed Jan 11 15:09:58 2017 +0000
@@ -53,4 +53,6 @@
// evidence that it's worth doing.
#define DEOPTIMIZE_WHEN_PATCHING
+#define SUPPORT_RESERVED_STACK_AREA
+
#endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp Wed Jan 11 15:09:58 2017 +0000
@@ -47,7 +47,7 @@
#define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
-#define DEFAULT_STACK_RESERVED_PAGES (0)
+#define DEFAULT_STACK_RESERVED_PAGES (1)
#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -619,6 +619,22 @@
// get sender esp
ldr(esp,
Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
+ if (StackReservedPages > 0) {
+ // testing if reserved zone needs to be re-enabled
+ Label no_reserved_zone_enabling;
+
+ ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
+ cmp(esp, rscratch1);
+ br(Assembler::LS, no_reserved_zone_enabling);
+
+ call_VM_leaf(
+ CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
+ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::throw_delayed_StackOverflowError));
+ should_not_reach_here();
+
+ bind(no_reserved_zone_enabling);
+ }
// remove frame anchor
leave();
// If we're returning to interpreted code we will shortly be
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -402,6 +402,30 @@
}
}
+void MacroAssembler::reserved_stack_check() {
+ // testing if reserved zone needs to be enabled
+ Label no_reserved_zone_enabling;
+
+ ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
+ cmp(sp, rscratch1);
+ br(Assembler::LO, no_reserved_zone_enabling);
+
+ enter(); // LR and FP are live.
+ lea(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
+ mov(c_rarg0, rthread);
+ blr(rscratch1);
+ leave();
+
+ // We have already removed our own frame.
+ // throw_delayed_StackOverflowError will think that it's been
+ // called by our caller.
+ lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
+ br(rscratch1);
+ should_not_reach_here();
+
+ bind(no_reserved_zone_enabling);
+}
+
int MacroAssembler::biased_locking_enter(Register lock_reg,
Register obj_reg,
Register swap_reg,
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jan 11 15:09:58 2017 +0000
@@ -957,6 +957,9 @@
// stack overflow + shadow pages. Also, clobbers tmp
void bang_stack_size(Register size, Register tmp);
+ // Check for reserved stack access in method being exited (for JIT)
+ void reserved_stack_check();
+
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
Register tmp,
int offset);
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -4676,8 +4676,11 @@
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
- SharedRuntime::
- throw_StackOverflowError));
+ SharedRuntime::throw_StackOverflowError));
+ StubRoutines::_throw_delayed_StackOverflowError_entry =
+ generate_throw_exception("delayed StackOverflowError throw_exception",
+ CAST_FROM_FN_PTR(address,
+ SharedRuntime::throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
--- a/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Wed Jan 11 15:09:58 2017 +0000
@@ -87,6 +87,7 @@
#define SPELL_REG_FP "rbp"
#else
#define REG_FP 29
+#define REG_LR 30
#define SPELL_REG_SP "sp"
#define SPELL_REG_FP "x29"
@@ -182,6 +183,46 @@
return frame(sp, fp, epc.pc());
}
+bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+ address pc = (address) os::Linux::ucontext_get_pc(uc);
+ if (Interpreter::contains(pc)) {
+ // interpreter performs stack banging after the fixed frame header has
+ // been generated while the compilers perform it before. To maintain
+ // semantic consistency between interpreted and compiled frames, the
+ // method returns the Java sender of the current frame.
+ *fr = os::fetch_frame_from_context(uc);
+ if (!fr->is_first_java_frame()) {
+ assert(fr->safe_for_sender(thread), "Safety check");
+ *fr = fr->java_sender();
+ }
+ } else {
+ // more complex code with compiled code
+ assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+ CodeBlob* cb = CodeCache::find_blob(pc);
+ if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+ // Not sure where the pc points to, fallback to default
+ // stack overflow handling
+ return false;
+ } else {
+ // In compiled code, the stack banging is performed before LR
+ // has been saved in the frame. LR is live, and SP and FP
+ // belong to the caller.
+ intptr_t* fp = os::Linux::ucontext_get_fp(uc);
+ intptr_t* sp = os::Linux::ucontext_get_sp(uc);
+ void* pc = (void*)(uc->uc_mcontext.regs[REG_LR]
+ - NativeInstruction::instruction_size);
+ *fr = frame(sp, fp, pc);
+ if (!fr->is_java_frame()) {
+ assert(fr->safe_for_sender(thread), "Safety check");
+ assert(!fr->is_first_frame(), "Safety check");
+ *fr = fr->java_sender();
+ }
+ }
+ }
+ assert(fr->is_java_frame(), "Safety check");
+ return true;
+}
+
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
frame os::get_sender_for_C_frame(frame* fr) {
@@ -313,6 +354,24 @@
if (thread->in_stack_yellow_reserved_zone(addr)) {
thread->disable_stack_yellow_reserved_zone();
if (thread->thread_state() == _thread_in_Java) {
+ if (thread->in_stack_reserved_zone(addr)) {
+ frame fr;
+ if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+ assert(fr.is_java_frame(), "Must be a Java frame");
+ frame activation =
+ SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+ if (activation.sp() != NULL) {
+ thread->disable_stack_reserved_zone();
+ if (activation.is_interpreted_frame()) {
+ thread->set_reserved_stack_activation((address)(
+ activation.fp() + frame::interpreter_frame_initial_sp_offset));
+ } else {
+ thread->set_reserved_stack_activation((address)activation.unextended_sp());
+ }
+ return 1;
+ }
+ }
+ }
// Throw a stack overflow exception. Guard pages will be reenabled
// while unwinding the stack.
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
--- a/hotspot/test/runtime/ReservedStack/ReservedStackTest.java Wed Jan 11 12:47:16 2017 +0100
+++ b/hotspot/test/runtime/ReservedStack/ReservedStackTest.java Wed Jan 11 15:09:58 2017 +0000
@@ -200,7 +200,8 @@
boolean supportedPlatform =
Platform.isAix() ||
(Platform.isLinux() &&
- (Platform.isPPC() || Platform.isS390x() || Platform.isX64() || Platform.isX86())) ||
+ (Platform.isPPC() || Platform.isS390x() || Platform.isX64() ||
+ Platform.isX86() || Platform.isAArch64())) ||
Platform.isOSX() ||
Platform.isSolaris();
if (supportedPlatform && !result.contains("PASSED")) {