|
1 /* |
|
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_RUNTIME_VFRAME_HPP |
|
26 #define SHARE_VM_RUNTIME_VFRAME_HPP |
|
27 |
|
28 #include "code/debugInfo.hpp" |
|
29 #include "code/debugInfoRec.hpp" |
|
30 #include "code/location.hpp" |
|
31 #include "oops/oop.hpp" |
|
32 #include "runtime/frame.hpp" |
|
33 #include "runtime/frame.inline.hpp" |
|
34 #include "runtime/stackValue.hpp" |
|
35 #include "runtime/stackValueCollection.hpp" |
|
36 #include "utilities/growableArray.hpp" |
|
37 |
|
38 // vframes are virtual stack frames representing source level activations. |
|
39 // A single frame may hold several source level activations in the case of |
|
40 // optimized code. The debugging stored with the optimized code enables |
|
41 // us to unfold a frame as a stack of vframes. |
|
42 // A cVFrame represents an activation of a non-java method. |
|
43 |
|
44 // The vframe inheritance hierarchy: |
|
45 // - vframe |
|
46 // - javaVFrame |
|
47 // - interpretedVFrame |
|
48 // - compiledVFrame ; (used for both compiled Java methods and native stubs) |
|
49 // - externalVFrame |
|
50 // - entryVFrame ; special frame created when calling Java from C |
|
51 |
|
52 // - BasicLock |
|
53 |
|
54 class vframe: public ResourceObj { |
|
55 protected: |
|
56 frame _fr; // Raw frame behind the virtual frame. |
|
57 RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers). |
|
58 JavaThread* _thread; // The thread owning the raw frame. |
|
59 |
|
60 vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); |
|
61 vframe(const frame* fr, JavaThread* thread); |
|
62 public: |
|
63 // Factory method for creating vframes |
|
64 static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread); |
|
65 |
|
66 // Accessors |
|
67 frame fr() const { return _fr; } |
|
68 CodeBlob* cb() const { return _fr.cb(); } |
|
69 CompiledMethod* nm() const { |
|
70 assert( cb() != NULL && cb()->is_compiled(), "usage"); |
|
71 return (CompiledMethod*) cb(); |
|
72 } |
|
73 |
|
74 // ???? Does this need to be a copy? |
|
75 frame* frame_pointer() { return &_fr; } |
|
76 const RegisterMap* register_map() const { return &_reg_map; } |
|
77 JavaThread* thread() const { return _thread; } |
|
78 |
|
79 // Returns the sender vframe |
|
80 virtual vframe* sender() const; |
|
81 |
|
82 // Returns the next javaVFrame on the stack (skipping all other kinds of frame) |
|
83 javaVFrame *java_sender() const; |
|
84 |
|
85 // Answers if the this is the top vframe in the frame, i.e., if the sender vframe |
|
86 // is in the caller frame |
|
87 virtual bool is_top() const { return true; } |
|
88 |
|
89 // Returns top vframe within same frame (see is_top()) |
|
90 virtual vframe* top() const; |
|
91 |
|
92 // Type testing operations |
|
93 virtual bool is_entry_frame() const { return false; } |
|
94 virtual bool is_java_frame() const { return false; } |
|
95 virtual bool is_interpreted_frame() const { return false; } |
|
96 virtual bool is_compiled_frame() const { return false; } |
|
97 |
|
98 #ifndef PRODUCT |
|
99 // printing operations |
|
100 virtual void print_value() const; |
|
101 virtual void print(); |
|
102 #endif |
|
103 }; |
|
104 |
|
105 |
|
106 class javaVFrame: public vframe { |
|
107 public: |
|
108 // JVM state |
|
109 virtual Method* method() const = 0; |
|
110 virtual int bci() const = 0; |
|
111 virtual StackValueCollection* locals() const = 0; |
|
112 virtual StackValueCollection* expressions() const = 0; |
|
113 // the order returned by monitors() is from oldest -> youngest#4418568 |
|
114 virtual GrowableArray<MonitorInfo*>* monitors() const = 0; |
|
115 |
|
116 // Debugging support via JVMTI. |
|
117 // NOTE that this is not guaranteed to give correct results for compiled vframes. |
|
118 // Deoptimize first if necessary. |
|
119 virtual void set_locals(StackValueCollection* values) const = 0; |
|
120 |
|
121 // Test operation |
|
122 bool is_java_frame() const { return true; } |
|
123 |
|
124 protected: |
|
125 javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} |
|
126 javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {} |
|
127 |
|
128 public: |
|
129 // casting |
|
130 static javaVFrame* cast(vframe* vf) { |
|
131 assert(vf == NULL || vf->is_java_frame(), "must be java frame"); |
|
132 return (javaVFrame*) vf; |
|
133 } |
|
134 |
|
135 // Return an array of monitors locked by this frame in the youngest to oldest order |
|
136 GrowableArray<MonitorInfo*>* locked_monitors(); |
|
137 |
|
138 // printing used during stack dumps and diagnostics |
|
139 static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state); |
|
140 void print_lock_info_on(outputStream* st, int frame_count); |
|
141 void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); } |
|
142 |
|
143 #ifndef PRODUCT |
|
144 public: |
|
145 // printing operations |
|
146 void print(); |
|
147 void print_value() const; |
|
148 void print_activation(int index) const; |
|
149 |
|
150 // verify operations |
|
151 virtual void verify() const; |
|
152 |
|
153 // Structural compare |
|
154 bool structural_compare(javaVFrame* other); |
|
155 #endif |
|
156 friend class vframe; |
|
157 }; |
|
158 |
|
159 class interpretedVFrame: public javaVFrame { |
|
160 public: |
|
161 // JVM state |
|
162 Method* method() const; |
|
163 int bci() const; |
|
164 StackValueCollection* locals() const; |
|
165 StackValueCollection* expressions() const; |
|
166 GrowableArray<MonitorInfo*>* monitors() const; |
|
167 |
|
168 void set_locals(StackValueCollection* values) const; |
|
169 |
|
170 // Test operation |
|
171 bool is_interpreted_frame() const { return true; } |
|
172 |
|
173 protected: |
|
174 interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {}; |
|
175 |
|
176 public: |
|
177 // Accessors for Byte Code Pointer |
|
178 u_char* bcp() const; |
|
179 void set_bcp(u_char* bcp); |
|
180 |
|
181 // casting |
|
182 static interpretedVFrame* cast(vframe* vf) { |
|
183 assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame"); |
|
184 return (interpretedVFrame*) vf; |
|
185 } |
|
186 |
|
187 private: |
|
188 static const int bcp_offset; |
|
189 intptr_t* locals_addr_at(int offset) const; |
|
190 StackValueCollection* stack_data(bool expressions) const; |
|
191 // returns where the parameters starts relative to the frame pointer |
|
192 int start_of_parameters() const; |
|
193 |
|
194 #ifndef PRODUCT |
|
195 public: |
|
196 // verify operations |
|
197 void verify() const; |
|
198 #endif |
|
199 friend class vframe; |
|
200 }; |
|
201 |
|
202 |
|
203 class externalVFrame: public vframe { |
|
204 protected: |
|
205 externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} |
|
206 |
|
207 #ifndef PRODUCT |
|
208 public: |
|
209 // printing operations |
|
210 void print_value() const; |
|
211 void print(); |
|
212 #endif |
|
213 friend class vframe; |
|
214 }; |
|
215 |
|
216 class entryVFrame: public externalVFrame { |
|
217 public: |
|
218 bool is_entry_frame() const { return true; } |
|
219 |
|
220 protected: |
|
221 entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); |
|
222 |
|
223 public: |
|
224 // casting |
|
225 static entryVFrame* cast(vframe* vf) { |
|
226 assert(vf == NULL || vf->is_entry_frame(), "must be entry frame"); |
|
227 return (entryVFrame*) vf; |
|
228 } |
|
229 |
|
230 #ifndef PRODUCT |
|
231 public: |
|
232 // printing |
|
233 void print_value() const; |
|
234 void print(); |
|
235 #endif |
|
236 friend class vframe; |
|
237 }; |
|
238 |
|
239 |
|
240 // A MonitorInfo is a ResourceObject that describes a the pair: |
|
241 // 1) the owner of the monitor |
|
242 // 2) the monitor lock |
|
243 class MonitorInfo : public ResourceObj { |
|
244 private: |
|
245 oop _owner; // the object owning the monitor |
|
246 BasicLock* _lock; |
|
247 oop _owner_klass; // klass (mirror) if owner was scalar replaced |
|
248 bool _eliminated; |
|
249 bool _owner_is_scalar_replaced; |
|
250 public: |
|
251 // Constructor |
|
252 MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) { |
|
253 if (!owner_is_scalar_replaced) { |
|
254 _owner = owner; |
|
255 _owner_klass = NULL; |
|
256 } else { |
|
257 assert(eliminated, "monitor should be eliminated for scalar replaced object"); |
|
258 _owner = NULL; |
|
259 _owner_klass = owner; |
|
260 } |
|
261 _lock = lock; |
|
262 _eliminated = eliminated; |
|
263 _owner_is_scalar_replaced = owner_is_scalar_replaced; |
|
264 } |
|
265 // Accessors |
|
266 oop owner() const { |
|
267 assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object"); |
|
268 return _owner; |
|
269 } |
|
270 oop owner_klass() const { |
|
271 assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object"); |
|
272 return _owner_klass; |
|
273 } |
|
274 BasicLock* lock() const { return _lock; } |
|
275 bool eliminated() const { return _eliminated; } |
|
276 bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; } |
|
277 }; |
|
278 |
|
279 class vframeStreamCommon : StackObj { |
|
280 protected: |
|
281 // common |
|
282 frame _frame; |
|
283 JavaThread* _thread; |
|
284 RegisterMap _reg_map; |
|
285 enum { interpreted_mode, compiled_mode, at_end_mode } _mode; |
|
286 |
|
287 int _sender_decode_offset; |
|
288 |
|
289 // Cached information |
|
290 Method* _method; |
|
291 int _bci; |
|
292 |
|
293 // Should VM activations be ignored or not |
|
294 bool _stop_at_java_call_stub; |
|
295 |
|
296 bool fill_in_compiled_inlined_sender(); |
|
297 void fill_from_compiled_frame(int decode_offset); |
|
298 void fill_from_compiled_native_frame(); |
|
299 |
|
300 void fill_from_interpreter_frame(); |
|
301 bool fill_from_frame(); |
|
302 |
|
303 // Helper routine for security_get_caller_frame |
|
304 void skip_prefixed_method_and_wrappers(); |
|
305 |
|
306 DEBUG_ONLY(void found_bad_method_frame() const;) |
|
307 |
|
308 public: |
|
309 // Constructor |
|
310 vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) { |
|
311 _thread = thread; |
|
312 } |
|
313 |
|
314 // Accessors |
|
315 Method* method() const { return _method; } |
|
316 int bci() const { return _bci; } |
|
317 intptr_t* frame_id() const { return _frame.id(); } |
|
318 address frame_pc() const { return _frame.pc(); } |
|
319 |
|
320 CodeBlob* cb() const { return _frame.cb(); } |
|
321 CompiledMethod* nm() const { |
|
322 assert( cb() != NULL && cb()->is_compiled(), "usage"); |
|
323 return (CompiledMethod*) cb(); |
|
324 } |
|
325 |
|
326 // Frame type |
|
327 bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); } |
|
328 bool is_entry_frame() const { return _frame.is_entry_frame(); } |
|
329 |
|
330 // Iteration |
|
331 void next() { |
|
332 // handle frames with inlining |
|
333 if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; |
|
334 |
|
335 // handle general case |
|
336 do { |
|
337 _frame = _frame.sender(&_reg_map); |
|
338 } while (!fill_from_frame()); |
|
339 } |
|
340 void security_next(); |
|
341 |
|
342 bool at_end() const { return _mode == at_end_mode; } |
|
343 |
|
344 // Implements security traversal. Skips depth no. of frame including |
|
345 // special security frames and prefixed native methods |
|
346 void security_get_caller_frame(int depth); |
|
347 |
|
348 // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4 |
|
349 // reflection implementation |
|
350 void skip_reflection_related_frames(); |
|
351 }; |
|
352 |
|
353 class vframeStream : public vframeStreamCommon { |
|
354 public: |
|
355 // Constructors |
|
356 vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false) |
|
357 : vframeStreamCommon(thread) { |
|
358 _stop_at_java_call_stub = stop_at_java_call_stub; |
|
359 |
|
360 if (!thread->has_last_Java_frame()) { |
|
361 _mode = at_end_mode; |
|
362 return; |
|
363 } |
|
364 |
|
365 _frame = _thread->last_frame(); |
|
366 while (!fill_from_frame()) { |
|
367 _frame = _frame.sender(&_reg_map); |
|
368 } |
|
369 } |
|
370 |
|
371 // top_frame may not be at safepoint, start with sender |
|
372 vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); |
|
373 }; |
|
374 |
|
375 |
|
376 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() { |
|
377 if (_sender_decode_offset == DebugInformationRecorder::serialized_null) { |
|
378 return false; |
|
379 } |
|
380 fill_from_compiled_frame(_sender_decode_offset); |
|
381 return true; |
|
382 } |
|
383 |
|
384 |
|
385 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { |
|
386 _mode = compiled_mode; |
|
387 |
|
388 // Range check to detect ridiculous offsets. |
|
389 if (decode_offset == DebugInformationRecorder::serialized_null || |
|
390 decode_offset < 0 || |
|
391 decode_offset >= nm()->scopes_data_size()) { |
|
392 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
|
393 // If we read nmethod::scopes_data at serialized_null (== 0) |
|
394 // or if read some at other invalid offset, invalid values will be decoded. |
|
395 // Based on these values, invalid heap locations could be referenced |
|
396 // that could lead to crashes in product mode. |
|
397 // Therefore, do not use the decode offset if invalid, but fill the frame |
|
398 // as it were a native compiled frame (no Java-level assumptions). |
|
399 #ifdef ASSERT |
|
400 if (WizardMode) { |
|
401 ttyLocker ttyl; |
|
402 tty->print_cr("Error in fill_from_frame: pc_desc for " |
|
403 INTPTR_FORMAT " not found or invalid at %d", |
|
404 p2i(_frame.pc()), decode_offset); |
|
405 nm()->print(); |
|
406 nm()->method()->print_codes(); |
|
407 nm()->print_code(); |
|
408 nm()->print_pcs(); |
|
409 } |
|
410 found_bad_method_frame(); |
|
411 #endif |
|
412 // Provide a cheap fallback in product mode. (See comment above.) |
|
413 fill_from_compiled_native_frame(); |
|
414 return; |
|
415 } |
|
416 |
|
417 // Decode first part of scopeDesc |
|
418 DebugInfoReadStream buffer(nm(), decode_offset); |
|
419 _sender_decode_offset = buffer.read_int(); |
|
420 _method = buffer.read_method(); |
|
421 _bci = buffer.read_bci(); |
|
422 |
|
423 assert(_method->is_method(), "checking type of decoded method"); |
|
424 } |
|
425 |
|
426 // The native frames are handled specially. We do not rely on ScopeDesc info |
|
427 // since the pc might not be exact due to the _last_native_pc trick. |
|
428 inline void vframeStreamCommon::fill_from_compiled_native_frame() { |
|
429 _mode = compiled_mode; |
|
430 _sender_decode_offset = DebugInformationRecorder::serialized_null; |
|
431 _method = nm()->method(); |
|
432 _bci = 0; |
|
433 } |
|
434 |
|
435 inline bool vframeStreamCommon::fill_from_frame() { |
|
436 // Interpreted frame |
|
437 if (_frame.is_interpreted_frame()) { |
|
438 fill_from_interpreter_frame(); |
|
439 return true; |
|
440 } |
|
441 |
|
442 // Compiled frame |
|
443 |
|
444 if (cb() != NULL && cb()->is_compiled()) { |
|
445 if (nm()->is_native_method()) { |
|
446 // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. |
|
447 fill_from_compiled_native_frame(); |
|
448 } else { |
|
449 PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); |
|
450 int decode_offset; |
|
451 if (pc_desc == NULL) { |
|
452 // Should not happen, but let fill_from_compiled_frame handle it. |
|
453 |
|
454 // If we are trying to walk the stack of a thread that is not |
|
455 // at a safepoint (like AsyncGetCallTrace would do) then this is an |
|
456 // acceptable result. [ This is assuming that safe_for_sender |
|
457 // is so bullet proof that we can trust the frames it produced. ] |
|
458 // |
|
459 // So if we see that the thread is not safepoint safe |
|
460 // then simply produce the method and a bci of zero |
|
461 // and skip the possibility of decoding any inlining that |
|
462 // may be present. That is far better than simply stopping (or |
|
463 // asserting. If however the thread is safepoint safe this |
|
464 // is the sign of a compiler bug and we'll let |
|
465 // fill_from_compiled_frame handle it. |
|
466 |
|
467 |
|
468 JavaThreadState state = _thread->thread_state(); |
|
469 |
|
470 // in_Java should be good enough to test safepoint safety |
|
471 // if state were say in_Java_trans then we'd expect that |
|
472 // the pc would have already been slightly adjusted to |
|
473 // one that would produce a pcDesc since the trans state |
|
474 // would be one that might in fact anticipate a safepoint |
|
475 |
|
476 if (state == _thread_in_Java ) { |
|
477 // This will get a method a zero bci and no inlining. |
|
478 // Might be nice to have a unique bci to signify this |
|
479 // particular case but for now zero will do. |
|
480 |
|
481 fill_from_compiled_native_frame(); |
|
482 |
|
483 // There is something to be said for setting the mode to |
|
484 // at_end_mode to prevent trying to walk further up the |
|
485 // stack. There is evidence that if we walk any further |
|
486 // that we could produce a bad stack chain. However until |
|
487 // we see evidence that allowing this causes us to find |
|
488 // frames bad enough to cause segv's or assertion failures |
|
489 // we don't do it as while we may get a bad call chain the |
|
490 // probability is much higher (several magnitudes) that we |
|
491 // get good data. |
|
492 |
|
493 return true; |
|
494 } |
|
495 decode_offset = DebugInformationRecorder::serialized_null; |
|
496 } else { |
|
497 decode_offset = pc_desc->scope_decode_offset(); |
|
498 } |
|
499 fill_from_compiled_frame(decode_offset); |
|
500 } |
|
501 return true; |
|
502 } |
|
503 |
|
504 // End of stack? |
|
505 if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) { |
|
506 _mode = at_end_mode; |
|
507 return true; |
|
508 } |
|
509 |
|
510 return false; |
|
511 } |
|
512 |
|
513 |
|
514 inline void vframeStreamCommon::fill_from_interpreter_frame() { |
|
515 Method* method = _frame.interpreter_frame_method(); |
|
516 address bcp = _frame.interpreter_frame_bcp(); |
|
517 int bci = method->validate_bci_from_bcp(bcp); |
|
518 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
|
519 // AsyncGetCallTrace interrupts the VM asynchronously. As a result |
|
520 // it is possible to access an interpreter frame for which |
|
521 // no Java-level information is yet available (e.g., becasue |
|
522 // the frame was being created when the VM interrupted it). |
|
523 // In this scenario, pretend that the interpreter is at the point |
|
524 // of entering the method. |
|
525 if (bci < 0) { |
|
526 DEBUG_ONLY(found_bad_method_frame();) |
|
527 bci = 0; |
|
528 } |
|
529 _mode = interpreted_mode; |
|
530 _method = method; |
|
531 _bci = bci; |
|
532 } |
|
533 |
|
534 #endif // SHARE_VM_RUNTIME_VFRAME_HPP |