hotspot/src/cpu/x86/vm/frame_x86.cpp
changeset 354 3b42d6fdcb82
parent 1 489c9b5090e2
child 670 ddf3e9583f2f
equal deleted inserted replaced
353:346ac690301f 354:3b42d6fdcb82
    35 
    35 
    36 bool frame::safe_for_sender(JavaThread *thread) {
    36 bool frame::safe_for_sender(JavaThread *thread) {
    37   address   sp = (address)_sp;
    37   address   sp = (address)_sp;
    38   address   fp = (address)_fp;
    38   address   fp = (address)_fp;
    39   address   unextended_sp = (address)_unextended_sp;
    39   address   unextended_sp = (address)_unextended_sp;
    40   bool sp_safe = (sp != NULL &&
    40   // sp must be within the stack
    41                  (sp <= thread->stack_base()) &&
    41   bool sp_safe = (sp <= thread->stack_base()) &&
    42                  (sp >= thread->stack_base() - thread->stack_size()));
    42                  (sp >= thread->stack_base() - thread->stack_size());
    43   bool unextended_sp_safe = (unextended_sp != NULL &&
    43 
    44                  (unextended_sp <= thread->stack_base()) &&
    44   if (!sp_safe) {
    45                  (unextended_sp >= thread->stack_base() - thread->stack_size()));
    45     return false;
    46   bool fp_safe = (fp != NULL &&
    46   }
    47                  (fp <= thread->stack_base()) &&
    47 
    48                  (fp >= thread->stack_base() - thread->stack_size()));
    48   // unextended sp must be within the stack and above or equal sp
    49   if (sp_safe && unextended_sp_safe && fp_safe) {
    49   bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
       
    50                             (unextended_sp >= sp);
       
    51 
       
    52   if (!unextended_sp_safe) {
       
    53     return false;
       
    54   }
       
    55 
       
    56   // an fp must be within the stack and above (but not equal) sp
       
    57   bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
       
    58 
       
    59   // We know sp/unextended_sp are safe only fp is questionable here
       
    60 
       
    61   // If the current frame is known to the code cache then we can attempt to
       
    62   // to construct the sender and do some validation of it. This goes a long way
       
    63   // toward eliminating issues when we get in frame construction code
       
    64 
       
    65   if (_cb != NULL ) {
       
    66 
       
    67     // First check if frame is complete and tester is reliable
    50     // Unfortunately we can only check frame complete for runtime stubs and nmethod
    68     // Unfortunately we can only check frame complete for runtime stubs and nmethod
    51     // other generic buffer blobs are more problematic so we just assume they are
    69     // other generic buffer blobs are more problematic so we just assume they are
    52     // ok. adapter blobs never have a frame complete and are never ok.
    70     // ok. adapter blobs never have a frame complete and are never ok.
    53     if (_cb != NULL && !_cb->is_frame_complete_at(_pc)) {
    71 
       
    72     if (!_cb->is_frame_complete_at(_pc)) {
    54       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
    73       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
    55         return false;
    74         return false;
    56       }
    75       }
    57     }
    76     }
       
    77     // Entry frame checks
       
    78     if (is_entry_frame()) {
       
    79       // an entry frame must have a valid fp.
       
    80 
       
    81       if (!fp_safe) return false;
       
    82 
       
    83       // Validate the JavaCallWrapper an entry frame must have
       
    84 
       
    85       address jcw = (address)entry_frame_call_wrapper();
       
    86 
       
    87       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
       
    88 
       
    89       return jcw_safe;
       
    90 
       
    91     }
       
    92 
       
    93     intptr_t* sender_sp = NULL;
       
    94     address   sender_pc = NULL;
       
    95 
       
    96     if (is_interpreted_frame()) {
       
    97       // fp must be safe
       
    98       if (!fp_safe) {
       
    99         return false;
       
   100       }
       
   101 
       
   102       sender_pc = (address) this->fp()[return_addr_offset];
       
   103       sender_sp = (intptr_t*) addr_at(sender_sp_offset);
       
   104 
       
   105     } else {
       
   106       // must be some sort of compiled/runtime frame
       
   107       // fp does not have to be safe (although it could be check for c1?)
       
   108 
       
   109       sender_sp = _unextended_sp + _cb->frame_size();
       
   110       // On Intel the return_address is always the word on the stack
       
   111       sender_pc = (address) *(sender_sp-1);
       
   112     }
       
   113 
       
   114     // We must always be able to find a recognizable pc
       
   115     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
       
   116     if (sender_pc == NULL ||  sender_blob == NULL) {
       
   117       return false;
       
   118     }
       
   119 
       
   120 
       
   121     // If the potential sender is the interpreter then we can do some more checking
       
   122     if (Interpreter::contains(sender_pc)) {
       
   123 
       
   124       // ebp is always saved in a recognizable place in any code we generate. However
       
   125       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
       
   126       // is really a frame pointer.
       
   127 
       
   128       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
       
   129       bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
       
   130 
       
   131       if (!saved_fp_safe) {
       
   132         return false;
       
   133       }
       
   134 
       
   135       // construct the potential sender
       
   136 
       
   137       frame sender(sender_sp, saved_fp, sender_pc);
       
   138 
       
   139       return sender.is_interpreted_frame_valid(thread);
       
   140 
       
   141     }
       
   142 
       
   143     // Could just be some random pointer within the codeBlob
       
   144 
       
   145     if (!sender_blob->instructions_contains(sender_pc)) return false;
       
   146 
       
   147     // We should never be able to see an adapter if the current frame is something from code cache
       
   148 
       
   149     if ( sender_blob->is_adapter_blob()) {
       
   150       return false;
       
   151     }
       
   152 
       
   153     // Could be the call_stub
       
   154 
       
   155     if (StubRoutines::returns_to_call_stub(sender_pc)) {
       
   156       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
       
   157       bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
       
   158 
       
   159       if (!saved_fp_safe) {
       
   160         return false;
       
   161       }
       
   162 
       
   163       // construct the potential sender
       
   164 
       
   165       frame sender(sender_sp, saved_fp, sender_pc);
       
   166 
       
   167       // Validate the JavaCallWrapper an entry frame must have
       
   168       address jcw = (address)sender.entry_frame_call_wrapper();
       
   169 
       
   170       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
       
   171 
       
   172       return jcw_safe;
       
   173     }
       
   174 
       
   175     // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
       
   176     // because the return address counts against the callee's frame.
       
   177 
       
   178     if (sender_blob->frame_size() == 0) {
       
   179       assert(!sender_blob->is_nmethod(), "should count return address at least");
       
   180       return false;
       
   181     }
       
   182 
       
   183     // We should never be able to see anything here except an nmethod. If something in the
       
   184     // code cache (current frame) is called by an entity within the code cache that entity
       
   185     // should not be anything but the call stub (already covered), the interpreter (already covered)
       
   186     // or an nmethod.
       
   187 
       
   188     assert(sender_blob->is_nmethod(), "Impossible call chain");
       
   189 
       
   190     // Could put some more validation for the potential non-interpreted sender
       
   191     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
       
   192 
       
   193     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
       
   194 
       
   195     // We've validated the potential sender that would be created
    58     return true;
   196     return true;
    59   }
   197   }
    60   // Note: fp == NULL is not really a prerequisite for this to be safe to
   198 
    61   // walk for c2. However we've modified the code such that if we get
   199   // Must be native-compiled frame. Since sender will try and use fp to find
    62   // a failure with fp != NULL that we then try with FP == NULL.
   200   // linkages it must be safe
    63   // This is basically to mimic what a last_frame would look like if
   201 
    64   // c2 had generated it.
   202   if (!fp_safe) {
    65   if (sp_safe && unextended_sp_safe && fp == NULL) {
   203     return false;
    66     // frame must be complete if fp == NULL as fp == NULL is only sensible
   204   }
    67     // if we are looking at a nmethod and frame complete assures us of that.
   205 
    68     if (_cb != NULL && _cb->is_frame_complete_at(_pc) && _cb->is_compiled_by_c2()) {
   206   // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
    69         return true;
   207 
    70     }
   208   if ( (address) this->fp()[return_addr_offset] == NULL) return false;
    71   }
   209 
    72   return false;
   210 
       
   211   // could try and do some more potential verification of native frame if we could think of some...
       
   212 
       
   213   return true;
       
   214 
    73 }
   215 }
    74 
   216 
    75 
   217 
    76 void frame::patch_pc(Thread* thread, address pc) {
   218 void frame::patch_pc(Thread* thread, address pc) {
    77   if (TracePcPatching) {
   219   if (TracePcPatching) {
   290 
   432 
   291 void frame::pd_gc_epilog() {
   433 void frame::pd_gc_epilog() {
   292   // nothing done here now
   434   // nothing done here now
   293 }
   435 }
   294 
   436 
   295 bool frame::is_interpreted_frame_valid() const {
   437 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
   296 // QQQ
   438 // QQQ
   297 #ifdef CC_INTERP
   439 #ifdef CC_INTERP
   298 #else
   440 #else
   299   assert(is_interpreted_frame(), "Not an interpreted frame");
   441   assert(is_interpreted_frame(), "Not an interpreted frame");
   300   // These are reasonable sanity checks
   442   // These are reasonable sanity checks
   310   // These are hacks to keep us out of trouble.
   452   // These are hacks to keep us out of trouble.
   311   // The problem with these is that they mask other problems
   453   // The problem with these is that they mask other problems
   312   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
   454   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
   313     return false;
   455     return false;
   314   }
   456   }
   315   if (fp() - sp() > 4096) {  // stack frames shouldn't be large.
   457 
   316     return false;
   458   // do some validation of frame elements
   317   }
   459 
       
   460   // first the method
       
   461 
       
   462   methodOop m = *interpreter_frame_method_addr();
       
   463 
       
   464   // validate the method we'd find in this potential sender
       
   465   if (!Universe::heap()->is_valid_method(m)) return false;
       
   466 
       
   467   // stack frames shouldn't be much larger than max_stack elements
       
   468 
       
   469   if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
       
   470     return false;
       
   471   }
       
   472 
       
   473   // validate bci/bcx
       
   474 
       
   475   intptr_t  bcx    = interpreter_frame_bcx();
       
   476   if (m->validate_bci_from_bcx(bcx) < 0) {
       
   477     return false;
       
   478   }
       
   479 
       
   480   // validate constantPoolCacheOop
       
   481 
       
   482   constantPoolCacheOop cp = *interpreter_frame_cache_addr();
       
   483 
       
   484   if (cp == NULL ||
       
   485       !Space::is_aligned(cp) ||
       
   486       !Universe::heap()->is_permanent((void*)cp)) return false;
       
   487 
       
   488   // validate locals
       
   489 
       
   490   address locals =  (address) *interpreter_frame_locals_addr();
       
   491 
       
   492   if (locals > thread->stack_base() || locals < (address) fp()) return false;
       
   493 
       
   494   // We'd have to be pretty unlucky to be mislead at this point
       
   495 
   318 #endif // CC_INTERP
   496 #endif // CC_INTERP
   319   return true;
   497   return true;
   320 }
   498 }
   321 
   499 
   322 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
   500 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {