305 |
304 |
306 DEBUG_ONLY(void found_bad_method_frame() const;) |
305 DEBUG_ONLY(void found_bad_method_frame() const;) |
307 |
306 |
308 public: |
307 public: |
309 // Constructor |
308 // Constructor |
310 vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) { |
309 inline vframeStreamCommon(JavaThread* thread); |
311 _thread = thread; |
|
312 } |
|
313 |
310 |
314 // Accessors |
311 // Accessors |
315 Method* method() const { return _method; } |
312 Method* method() const { return _method; } |
316 int bci() const { return _bci; } |
313 int bci() const { return _bci; } |
317 intptr_t* frame_id() const { return _frame.id(); } |
314 inline intptr_t* frame_id() const; |
318 address frame_pc() const { return _frame.pc(); } |
315 address frame_pc() const { return _frame.pc(); } |
319 |
316 |
320 CodeBlob* cb() const { return _frame.cb(); } |
317 CodeBlob* cb() const { return _frame.cb(); } |
321 CompiledMethod* nm() const { |
318 CompiledMethod* nm() const { |
322 assert( cb() != NULL && cb()->is_compiled(), "usage"); |
319 assert( cb() != NULL && cb()->is_compiled(), "usage"); |
323 return (CompiledMethod*) cb(); |
320 return (CompiledMethod*) cb(); |
324 } |
321 } |
325 |
322 |
326 // Frame type |
323 // Frame type |
327 bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); } |
324 inline bool is_interpreted_frame() const; |
328 bool is_entry_frame() const { return _frame.is_entry_frame(); } |
325 inline bool is_entry_frame() const; |
329 |
326 |
330 // Iteration |
327 // Iteration |
331 void next() { |
328 inline void next(); |
332 // handle frames with inlining |
|
333 if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; |
|
334 |
|
335 // handle general case |
|
336 do { |
|
337 _frame = _frame.sender(&_reg_map); |
|
338 } while (!fill_from_frame()); |
|
339 } |
|
340 void security_next(); |
329 void security_next(); |
341 |
330 |
342 bool at_end() const { return _mode == at_end_mode; } |
331 bool at_end() const { return _mode == at_end_mode; } |
343 |
332 |
344 // Implements security traversal. Skips depth no. of frame including |
333 // Implements security traversal. Skips depth no. of frame including |
351 }; |
340 }; |
352 |
341 |
353 class vframeStream : public vframeStreamCommon { |
342 class vframeStream : public vframeStreamCommon { |
354 public: |
343 public: |
355 // Constructors |
344 // Constructors |
356 vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false) |
345 vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false); |
357 : vframeStreamCommon(thread) { |
|
358 _stop_at_java_call_stub = stop_at_java_call_stub; |
|
359 |
|
360 if (!thread->has_last_Java_frame()) { |
|
361 _mode = at_end_mode; |
|
362 return; |
|
363 } |
|
364 |
|
365 _frame = _thread->last_frame(); |
|
366 while (!fill_from_frame()) { |
|
367 _frame = _frame.sender(&_reg_map); |
|
368 } |
|
369 } |
|
370 |
346 |
371 // top_frame may not be at safepoint, start with sender |
347 // top_frame may not be at safepoint, start with sender |
372 vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); |
348 vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); |
373 }; |
349 }; |
374 |
350 |
375 |
|
376 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() { |
|
377 if (_sender_decode_offset == DebugInformationRecorder::serialized_null) { |
|
378 return false; |
|
379 } |
|
380 fill_from_compiled_frame(_sender_decode_offset); |
|
381 return true; |
|
382 } |
|
383 |
|
384 |
|
385 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { |
|
386 _mode = compiled_mode; |
|
387 |
|
388 // Range check to detect ridiculous offsets. |
|
389 if (decode_offset == DebugInformationRecorder::serialized_null || |
|
390 decode_offset < 0 || |
|
391 decode_offset >= nm()->scopes_data_size()) { |
|
392 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
|
393 // If we read nmethod::scopes_data at serialized_null (== 0) |
|
394 // or if read some at other invalid offset, invalid values will be decoded. |
|
395 // Based on these values, invalid heap locations could be referenced |
|
396 // that could lead to crashes in product mode. |
|
397 // Therefore, do not use the decode offset if invalid, but fill the frame |
|
398 // as it were a native compiled frame (no Java-level assumptions). |
|
399 #ifdef ASSERT |
|
400 if (WizardMode) { |
|
401 ttyLocker ttyl; |
|
402 tty->print_cr("Error in fill_from_frame: pc_desc for " |
|
403 INTPTR_FORMAT " not found or invalid at %d", |
|
404 p2i(_frame.pc()), decode_offset); |
|
405 nm()->print(); |
|
406 nm()->method()->print_codes(); |
|
407 nm()->print_code(); |
|
408 nm()->print_pcs(); |
|
409 } |
|
410 found_bad_method_frame(); |
|
411 #endif |
|
412 // Provide a cheap fallback in product mode. (See comment above.) |
|
413 fill_from_compiled_native_frame(); |
|
414 return; |
|
415 } |
|
416 |
|
417 // Decode first part of scopeDesc |
|
418 DebugInfoReadStream buffer(nm(), decode_offset); |
|
419 _sender_decode_offset = buffer.read_int(); |
|
420 _method = buffer.read_method(); |
|
421 _bci = buffer.read_bci(); |
|
422 |
|
423 assert(_method->is_method(), "checking type of decoded method"); |
|
424 } |
|
425 |
|
426 // The native frames are handled specially. We do not rely on ScopeDesc info |
|
427 // since the pc might not be exact due to the _last_native_pc trick. |
|
428 inline void vframeStreamCommon::fill_from_compiled_native_frame() { |
|
429 _mode = compiled_mode; |
|
430 _sender_decode_offset = DebugInformationRecorder::serialized_null; |
|
431 _method = nm()->method(); |
|
432 _bci = 0; |
|
433 } |
|
434 |
|
435 inline bool vframeStreamCommon::fill_from_frame() { |
|
436 // Interpreted frame |
|
437 if (_frame.is_interpreted_frame()) { |
|
438 fill_from_interpreter_frame(); |
|
439 return true; |
|
440 } |
|
441 |
|
442 // Compiled frame |
|
443 |
|
444 if (cb() != NULL && cb()->is_compiled()) { |
|
445 if (nm()->is_native_method()) { |
|
446 // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. |
|
447 fill_from_compiled_native_frame(); |
|
448 } else { |
|
449 PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); |
|
450 int decode_offset; |
|
451 if (pc_desc == NULL) { |
|
452 // Should not happen, but let fill_from_compiled_frame handle it. |
|
453 |
|
454 // If we are trying to walk the stack of a thread that is not |
|
455 // at a safepoint (like AsyncGetCallTrace would do) then this is an |
|
456 // acceptable result. [ This is assuming that safe_for_sender |
|
457 // is so bullet proof that we can trust the frames it produced. ] |
|
458 // |
|
459 // So if we see that the thread is not safepoint safe |
|
460 // then simply produce the method and a bci of zero |
|
461 // and skip the possibility of decoding any inlining that |
|
462 // may be present. That is far better than simply stopping (or |
|
463 // asserting. If however the thread is safepoint safe this |
|
464 // is the sign of a compiler bug and we'll let |
|
465 // fill_from_compiled_frame handle it. |
|
466 |
|
467 |
|
468 JavaThreadState state = _thread->thread_state(); |
|
469 |
|
470 // in_Java should be good enough to test safepoint safety |
|
471 // if state were say in_Java_trans then we'd expect that |
|
472 // the pc would have already been slightly adjusted to |
|
473 // one that would produce a pcDesc since the trans state |
|
474 // would be one that might in fact anticipate a safepoint |
|
475 |
|
476 if (state == _thread_in_Java ) { |
|
477 // This will get a method a zero bci and no inlining. |
|
478 // Might be nice to have a unique bci to signify this |
|
479 // particular case but for now zero will do. |
|
480 |
|
481 fill_from_compiled_native_frame(); |
|
482 |
|
483 // There is something to be said for setting the mode to |
|
484 // at_end_mode to prevent trying to walk further up the |
|
485 // stack. There is evidence that if we walk any further |
|
486 // that we could produce a bad stack chain. However until |
|
487 // we see evidence that allowing this causes us to find |
|
488 // frames bad enough to cause segv's or assertion failures |
|
489 // we don't do it as while we may get a bad call chain the |
|
490 // probability is much higher (several magnitudes) that we |
|
491 // get good data. |
|
492 |
|
493 return true; |
|
494 } |
|
495 decode_offset = DebugInformationRecorder::serialized_null; |
|
496 } else { |
|
497 decode_offset = pc_desc->scope_decode_offset(); |
|
498 } |
|
499 fill_from_compiled_frame(decode_offset); |
|
500 } |
|
501 return true; |
|
502 } |
|
503 |
|
504 // End of stack? |
|
505 if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) { |
|
506 _mode = at_end_mode; |
|
507 return true; |
|
508 } |
|
509 |
|
510 return false; |
|
511 } |
|
512 |
|
513 |
|
514 inline void vframeStreamCommon::fill_from_interpreter_frame() { |
|
515 Method* method = _frame.interpreter_frame_method(); |
|
516 address bcp = _frame.interpreter_frame_bcp(); |
|
517 int bci = method->validate_bci_from_bcp(bcp); |
|
518 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
|
519 // AsyncGetCallTrace interrupts the VM asynchronously. As a result |
|
520 // it is possible to access an interpreter frame for which |
|
521 // no Java-level information is yet available (e.g., becasue |
|
522 // the frame was being created when the VM interrupted it). |
|
523 // In this scenario, pretend that the interpreter is at the point |
|
524 // of entering the method. |
|
525 if (bci < 0) { |
|
526 DEBUG_ONLY(found_bad_method_frame();) |
|
527 bci = 0; |
|
528 } |
|
529 _mode = interpreted_mode; |
|
530 _method = method; |
|
531 _bci = bci; |
|
532 } |
|
533 |
|
534 #endif // SHARE_VM_RUNTIME_VFRAME_HPP |
351 #endif // SHARE_VM_RUNTIME_VFRAME_HPP |