--- a/src/hotspot/cpu/s390/frame_s390.cpp Tue Oct 09 14:30:06 2018 +0200
+++ b/src/hotspot/cpu/s390/frame_s390.cpp Tue Oct 09 15:06:27 2018 +0200
@@ -53,14 +53,135 @@
bool frame::safe_for_sender(JavaThread *thread) {
bool safe = false;
- address cursp = (address)sp();
- address curfp = (address)fp();
- if ((cursp != NULL && curfp != NULL &&
- (cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
- (curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
- safe = true;
+ address sp = (address)_sp;
+ address fp = (address)_fp;
+ address unextended_sp = (address)_unextended_sp;
+
+ // Consider stack guards when trying to determine "safe" stack pointers
+ static size_t stack_guard_size = os::uses_stack_guard_pages() ?
+ JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_reserved_zone_size() : 0;
+ size_t usable_stack_size = thread->stack_size() - stack_guard_size;
+
+ // sp must be within the usable part of the stack (not in guards)
+ bool sp_safe = (sp < thread->stack_base()) &&
+ (sp >= thread->stack_base() - usable_stack_size);
+
+
+ if (!sp_safe) {
+ return false;
+ }
+
+ // Unextended sp must be within the stack
+ bool unextended_sp_safe = (unextended_sp < thread->stack_base());
+
+ if (!unextended_sp_safe) {
+ return false;
}
- return safe;
+
+ // An fp must be within the stack and above (but not equal) sp.
+ bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
+ // An interpreter fp must be within the stack and above (but not equal) sp.
+ // Moreover, it must be at least the size of the z_ijava_state structure.
+ bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) &&
+ ((fp - sp) >= z_ijava_state_size);
+
+ // We know sp/unextended_sp are safe, only fp is questionable here
+
+ // If the current frame is known to the code cache then we can attempt to
+ // to construct the sender and do some validation of it. This goes a long way
+ // toward eliminating issues when we get in frame construction code
+
+ if (_cb != NULL ) {
+ // Entry frame checks
+ if (is_entry_frame()) {
+ // An entry frame must have a valid fp.
+ return fp_safe && is_entry_frame_valid(thread);
+ }
+
+ // Now check if the frame is complete and the test is
+ // reliable. Unfortunately we can only check frame completeness for
+ // runtime stubs. Other generic buffer blobs are more
+ // problematic so we just assume they are OK. Adapter blobs never have a
+ // complete frame and are never OK. nmethods should be OK on s390.
+ if (!_cb->is_frame_complete_at(_pc)) {
+ if (_cb->is_adapter_blob() || _cb->is_runtime_stub()) {
+ return false;
+ }
+ }
+
+ // Could just be some random pointer within the codeBlob.
+ if (!_cb->code_contains(_pc)) {
+ return false;
+ }
+
+ if (is_interpreted_frame() && !fp_interp_safe) {
+ return false;
+ }
+
+ z_abi_160* sender_abi = (z_abi_160*) fp;
+ intptr_t* sender_sp = (intptr_t*) sender_abi->callers_sp;
+ address sender_pc = (address) sender_abi->return_pc;
+
+ // We must always be able to find a recognizable pc.
+ CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
+ if (sender_blob == NULL) {
+ return false;
+ }
+
+ // Could be a zombie method
+ if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
+ return false;
+ }
+
+ // It should be safe to construct the sender though it might not be valid.
+
+ frame sender(sender_sp, sender_pc);
+
+ // Do we have a valid fp?
+ address sender_fp = (address) sender.fp();
+
+ // sender_fp must be within the stack and above (but not
+ // equal) current frame's fp.
+ if (sender_fp > thread->stack_base() || sender_fp <= fp) {
+ return false;
+ }
+
+ // If the potential sender is the interpreter then we can do some more checking.
+ if (Interpreter::contains(sender_pc)) {
+ return sender.is_interpreted_frame_valid(thread);
+ }
+
+ // Could just be some random pointer within the codeBlob.
+ if (!sender.cb()->code_contains(sender_pc)) {
+ return false;
+ }
+
+ // We should never be able to see an adapter if the current frame is something from code cache.
+ if (sender_blob->is_adapter_blob()) {
+ return false;
+ }
+
+ if (sender.is_entry_frame()) {
+ return sender.is_entry_frame_valid(thread);
+ }
+
+ // Frame size is always greater than zero. If the sender frame size is zero or less,
+ // something is really weird and we better give up.
+ if (sender_blob->frame_size() <= 0) {
+ return false;
+ }
+
+ return true;
+ }
+
+ // Must be native-compiled frame. Since sender will try and use fp to find
+ // linkages it must be safe
+
+ if (!fp_safe) {
+ return false;
+ }
+
+ return true;
}
bool frame::is_interpreted_frame() const {
--- a/src/hotspot/cpu/s390/frame_s390.hpp Tue Oct 09 14:30:06 2018 +0200
+++ b/src/hotspot/cpu/s390/frame_s390.hpp Tue Oct 09 15:06:27 2018 +0200
@@ -491,9 +491,12 @@
static int interpreter_frame_interpreterstate_size_in_bytes();
static int interpreter_frame_monitor_size_in_bytes();
+
+ // template interpreter state
+ inline z_ijava_state* ijava_state_unchecked() const;
+
private:
- // template interpreter state
inline z_ijava_state* ijava_state() const;
// Where z_ijava_state.monitors is saved.
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp Tue Oct 09 14:30:06 2018 +0200
+++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp Tue Oct 09 15:06:27 2018 +0200
@@ -77,8 +77,13 @@
#endif
// template interpreter state
+inline frame::z_ijava_state* frame::ijava_state_unchecked() const {
+ z_ijava_state* state = (z_ijava_state*) ((uintptr_t)fp() - z_ijava_state_size);
+ return state;
+}
+
inline frame::z_ijava_state* frame::ijava_state() const {
- z_ijava_state* state = (z_ijava_state*) ((uintptr_t)fp() - z_ijava_state_size);
+ z_ijava_state* state = ijava_state_unchecked();
assert(state->magic == (intptr_t) frame::z_istate_magic_number,
"wrong z_ijava_state in interpreter frame (no magic found)");
return state;
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Tue Oct 09 14:30:06 2018 +0200
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Tue Oct 09 15:06:27 2018 +0200
@@ -42,10 +42,51 @@
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
- ucontext_t* uc = (ucontext_t*) ucontext;
- *fr_addr = frame((intptr_t*)uc->uc_mcontext.gregs[15/*REG_SP*/],
+ assert(this->is_Java_thread(), "must be JavaThread");
+
+ // If we have a last_Java_frame, then we should use it even if
+ // isInJava == true. It should be more reliable than ucontext info.
+ if (has_last_Java_frame() && frame_anchor()->walkable()) {
+ *fr_addr = pd_last_frame();
+ return true;
+ }
+
+ if (isInJava) {
+ ucontext_t* uc = (ucontext_t*) ucontext;
+ frame ret_frame((intptr_t*)uc->uc_mcontext.gregs[15/*Z_SP*/],
(address)uc->uc_mcontext.psw.addr);
- return true;
+
+ if (ret_frame.pc() == NULL) {
+ // ucontext wasn't useful
+ return false;
+ }
+
+ if (ret_frame.is_interpreted_frame()) {
+ frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
+ if (!((Method*)(istate->method))->is_metaspace_object()) {
+ return false;
+ }
+ uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
+ uint64_t istate_bcp = istate->bcp;
+ uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base());
+ uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size());
+ if (istate_bcp >= code_start && istate_bcp < code_end) {
+ // we have a valid bcp, don't touch it, do nothing
+ } else if (reg_bcp >= code_start && reg_bcp < code_end) {
+ istate->bcp = reg_bcp;
+ } else {
+ return false;
+ }
+ }
+ if (!ret_frame.safe_for_sender(this)) {
+ // nothing else to try if the frame isn't good
+ return false;
+ }
+ *fr_addr = ret_frame;
+ return true;
+ }
+ // nothing else to try
+ return false;
}
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/S390x.