142 intptr_t* fp; |
142 intptr_t* fp; |
143 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
143 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
144 return frame(sp, epc.pc()); |
144 return frame(sp, epc.pc()); |
145 } |
145 } |
146 |
146 |
|
147 bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { |
|
148 address pc = (address) os::Linux::ucontext_get_pc(uc); |
|
149 if (Interpreter::contains(pc)) { |
|
150 // Interpreter performs stack banging after the fixed frame header has |
|
151 // been generated while the compilers perform it before. To maintain |
|
152 // semantic consistency between interpreted and compiled frames, the |
|
153 // method returns the Java sender of the current frame. |
|
154 *fr = os::fetch_frame_from_context(uc); |
|
155 if (!fr->is_first_java_frame()) { |
|
156 assert(fr->safe_for_sender(thread), "Safety check"); |
|
157 *fr = fr->java_sender(); |
|
158 } |
|
159 } else { |
|
160 // More complex code with compiled code. |
|
161 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); |
|
162 CodeBlob* cb = CodeCache::find_blob(pc); |
|
163 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { |
|
164 // Not sure where the pc points to, fallback to default |
|
165 // stack overflow handling. In compiled code, we bang before |
|
166 // the frame is complete. |
|
167 return false; |
|
168 } else { |
|
169 intptr_t* fp = os::Linux::ucontext_get_fp(uc); |
|
170 intptr_t* sp = os::Linux::ucontext_get_sp(uc); |
|
171 *fr = frame(sp, (address)*sp); |
|
172 if (!fr->is_java_frame()) { |
|
173 assert(fr->safe_for_sender(thread), "Safety check"); |
|
174 assert(!fr->is_first_frame(), "Safety check"); |
|
175 *fr = fr->java_sender(); |
|
176 } |
|
177 } |
|
178 } |
|
179 assert(fr->is_java_frame(), "Safety check"); |
|
180 return true; |
|
181 } |
|
182 |
147 frame os::get_sender_for_C_frame(frame* fr) { |
183 frame os::get_sender_for_C_frame(frame* fr) { |
148 if (*fr->sp() == 0) { |
184 if (*fr->sp() == 0) { |
149 // fr is the last C frame. |
185 // fr is the last C frame. |
150 return frame(); |
186 return frame(); |
151 } |
187 } |
277 |
313 |
278 // Check if fault address is within thread stack. |
314 // Check if fault address is within thread stack. |
279 if (thread->on_local_stack(addr)) { |
315 if (thread->on_local_stack(addr)) { |
280 // stack overflow |
316 // stack overflow |
281 if (thread->in_stack_yellow_reserved_zone(addr)) { |
317 if (thread->in_stack_yellow_reserved_zone(addr)) { |
282 thread->disable_stack_yellow_reserved_zone(); |
|
283 if (thread->thread_state() == _thread_in_Java) { |
318 if (thread->thread_state() == _thread_in_Java) { |
|
319 if (thread->in_stack_reserved_zone(addr)) { |
|
320 frame fr; |
|
321 if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) { |
|
322 assert(fr.is_java_frame(), "Must be a Javac frame"); |
|
323 frame activation = |
|
324 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); |
|
325 if (activation.sp() != NULL) { |
|
326 thread->disable_stack_reserved_zone(); |
|
327 if (activation.is_interpreted_frame()) { |
|
328 thread->set_reserved_stack_activation((address)activation.fp()); |
|
329 } else { |
|
330 thread->set_reserved_stack_activation((address)activation.unextended_sp()); |
|
331 } |
|
332 return 1; |
|
333 } |
|
334 } |
|
335 } |
284 // Throw a stack overflow exception. |
336 // Throw a stack overflow exception. |
285 // Guard pages will be reenabled while unwinding the stack. |
337 // Guard pages will be reenabled while unwinding the stack. |
|
338 thread->disable_stack_yellow_reserved_zone(); |
286 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
339 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
287 } else { |
340 } else { |
288 // Thread was in the vm or native code. Return and try to finish. |
341 // Thread was in the vm or native code. Return and try to finish. |
|
342 thread->disable_stack_yellow_reserved_zone(); |
289 return 1; |
343 return 1; |
290 } |
344 } |
291 } else if (thread->in_stack_red_zone(addr)) { |
345 } else if (thread->in_stack_red_zone(addr)) { |
292 // Fatal red zone violation. Disable the guard pages and fall through |
346 // Fatal red zone violation. Disable the guard pages and fall through |
293 // to handle_unexpected_exception way down below. |
347 // to handle_unexpected_exception way down below. |