hotspot/src/share/vm/runtime/deoptimization.cpp
changeset 1 489c9b5090e2
child 217 c646ef2f5d58
equal deleted inserted replaced
0:fd16c54261b3 1:489c9b5090e2
       
     1 /*
       
     2  * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
       
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
       
    21  * have any questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "incls/_precompiled.incl"
       
    26 #include "incls/_deoptimization.cpp.incl"
       
    27 
       
    28 bool DeoptimizationMarker::_is_active = false;
       
    29 
       
    30 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
       
    31                                          int  caller_adjustment,
       
    32                                          int  number_of_frames,
       
    33                                          intptr_t* frame_sizes,
       
    34                                          address* frame_pcs,
       
    35                                          BasicType return_type) {
       
    36   _size_of_deoptimized_frame = size_of_deoptimized_frame;
       
    37   _caller_adjustment         = caller_adjustment;
       
    38   _number_of_frames          = number_of_frames;
       
    39   _frame_sizes               = frame_sizes;
       
    40   _frame_pcs                 = frame_pcs;
       
    41   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
       
    42   _return_type               = return_type;
       
    43   // PD (x86 only)
       
    44   _counter_temp              = 0;
       
    45   _initial_fp                = 0;
       
    46   _unpack_kind               = 0;
       
    47   _sender_sp_temp            = 0;
       
    48 
       
    49   _total_frame_sizes         = size_of_frames();
       
    50 }
       
    51 
       
    52 
       
    53 Deoptimization::UnrollBlock::~UnrollBlock() {
       
    54   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
       
    55   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
       
    56   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
       
    57 }
       
    58 
       
    59 
       
    60 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
       
    61   assert(register_number < RegisterMap::reg_count, "checking register number");
       
    62   return &_register_block[register_number * 2];
       
    63 }
       
    64 
       
    65 
       
    66 
       
    67 int Deoptimization::UnrollBlock::size_of_frames() const {
       
    68   // Acount first for the adjustment of the initial frame
       
    69   int result = _caller_adjustment;
       
    70   for (int index = 0; index < number_of_frames(); index++) {
       
    71     result += frame_sizes()[index];
       
    72   }
       
    73   return result;
       
    74 }
       
    75 
       
    76 
       
    77 void Deoptimization::UnrollBlock::print() {
       
    78   ttyLocker ttyl;
       
    79   tty->print_cr("UnrollBlock");
       
    80   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
       
    81   tty->print(   "  frame_sizes: ");
       
    82   for (int index = 0; index < number_of_frames(); index++) {
       
    83     tty->print("%d ", frame_sizes()[index]);
       
    84   }
       
    85   tty->cr();
       
    86 }
       
    87 
       
    88 
       
    89 // In order to make fetch_unroll_info work properly with escape
       
    90 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
       
    91 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
       
    92 // of previously eliminated objects occurs in realloc_objects, which is
       
    93 // called from the method fetch_unroll_info_helper below.
       
    94 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
       
    95   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
       
    96   // but makes the entry a little slower. There is however a little dance we have to
       
    97   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
       
    98 
       
    99   // fetch_unroll_info() is called at the beginning of the deoptimization
       
   100   // handler. Note this fact before we start generating temporary frames
       
   101   // that can confuse an asynchronous stack walker. This counter is
       
   102   // decremented at the end of unpack_frames().
       
   103   thread->inc_in_deopt_handler();
       
   104 
       
   105   return fetch_unroll_info_helper(thread);
       
   106 JRT_END
       
   107 
       
   108 
       
   109 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
       
   110 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
       
   111 
       
   112   // Note: there is a safepoint safety issue here. No matter whether we enter
       
   113   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
       
   114   // the vframeArray is created.
       
   115   //
       
   116 
       
   117   // Allocate our special deoptimization ResourceMark
       
   118   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
       
   119   assert(thread->deopt_mark() == NULL, "Pending deopt!");
       
   120   thread->set_deopt_mark(dmark);
       
   121 
       
   122   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
       
   123   RegisterMap map(thread, true);
       
   124   RegisterMap dummy_map(thread, false);
       
   125   // Now get the deoptee with a valid map
       
   126   frame deoptee = stub_frame.sender(&map);
       
   127 
       
   128   // Create a growable array of VFrames where each VFrame represents an inlined
       
   129   // Java frame.  This storage is allocated with the usual system arena.
       
   130   assert(deoptee.is_compiled_frame(), "Wrong frame type");
       
   131   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
       
   132   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
       
   133   while (!vf->is_top()) {
       
   134     assert(vf->is_compiled_frame(), "Wrong frame type");
       
   135     chunk->push(compiledVFrame::cast(vf));
       
   136     vf = vf->sender();
       
   137   }
       
   138   assert(vf->is_compiled_frame(), "Wrong frame type");
       
   139   chunk->push(compiledVFrame::cast(vf));
       
   140 
       
   141 #ifdef COMPILER2
       
   142   // Reallocate the non-escaping objects and restore their fields. Then
       
   143   // relock objects if synchronization on them was eliminated.
       
   144   if (DoEscapeAnalysis && EliminateAllocations) {
       
   145     GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
       
   146     bool reallocated = false;
       
   147     if (objects != NULL) {
       
   148       JRT_BLOCK
       
   149         reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
       
   150       JRT_END
       
   151     }
       
   152     if (reallocated) {
       
   153       reassign_fields(&deoptee, &map, objects);
       
   154 #ifndef PRODUCT
       
   155       if (TraceDeoptimization) {
       
   156         ttyLocker ttyl;
       
   157         tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
       
   158         print_objects(objects);
       
   159       }
       
   160 #endif
       
   161     }
       
   162     for (int i = 0; i < chunk->length(); i++) {
       
   163       GrowableArray<MonitorValue*>* monitors = chunk->at(i)->scope()->monitors();
       
   164       if (monitors != NULL) {
       
   165         relock_objects(&deoptee, &map, monitors);
       
   166 #ifndef PRODUCT
       
   167         if (TraceDeoptimization) {
       
   168           ttyLocker ttyl;
       
   169           tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
       
   170           for (int j = 0; i < monitors->length(); i++) {
       
   171             MonitorValue* mv = monitors->at(i);
       
   172             if (mv->eliminated()) {
       
   173               StackValue* owner = StackValue::create_stack_value(&deoptee, &map, mv->owner());
       
   174               tty->print_cr("     object <" INTPTR_FORMAT "> locked", owner->get_obj()());
       
   175             }
       
   176           }
       
   177         }
       
   178 #endif
       
   179       }
       
   180     }
       
   181   }
       
   182 #endif // COMPILER2
       
   183   // Ensure that no safepoint is taken after pointers have been stored
       
   184   // in fields of rematerialized objects.  If a safepoint occurs from here on
       
   185   // out the java state residing in the vframeArray will be missed.
       
   186   No_Safepoint_Verifier no_safepoint;
       
   187 
       
   188   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
       
   189 
       
   190   assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
       
   191   thread->set_vframe_array_head(array);
       
   192 
       
   193   // Now that the vframeArray has been created if we have any deferred local writes
       
   194   // added by jvmti then we can free up that structure as the data is now in the
       
   195   // vframeArray
       
   196 
       
   197   if (thread->deferred_locals() != NULL) {
       
   198     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
       
   199     int i = 0;
       
   200     do {
       
   201       // Because of inlining we could have multiple vframes for a single frame
       
   202       // and several of the vframes could have deferred writes. Find them all.
       
   203       if (list->at(i)->id() == array->original().id()) {
       
   204         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
       
   205         list->remove_at(i);
       
   206         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
       
   207         delete dlv;
       
   208       } else {
       
   209         i++;
       
   210       }
       
   211     } while ( i < list->length() );
       
   212     if (list->length() == 0) {
       
   213       thread->set_deferred_locals(NULL);
       
   214       // free the list and elements back to C heap.
       
   215       delete list;
       
   216     }
       
   217 
       
   218   }
       
   219 
       
   220   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
       
   221   CodeBlob* cb = stub_frame.cb();
       
   222   // Verify we have the right vframeArray
       
   223   assert(cb->frame_size() >= 0, "Unexpected frame size");
       
   224   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
       
   225 
       
   226 #ifdef ASSERT
       
   227   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
       
   228   Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
       
   229 #endif
       
   230   // This is a guarantee instead of an assert because if vframe doesn't match
       
   231   // we will unpack the wrong deoptimized frame and wind up in strange places
       
   232   // where it will be very difficult to figure out what went wrong. Better
       
   233   // to die an early death here than some very obscure death later when the
       
   234   // trail is cold.
       
   235   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
       
   236   // in that it will fail to detect a problem when there is one. This needs
       
   237   // more work in tiger timeframe.
       
   238   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
       
   239 
       
   240   int number_of_frames = array->frames();
       
   241 
       
   242   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
       
   243   // virtual activation, which is the reverse of the elements in the vframes array.
       
   244   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
       
   245   // +1 because we always have an interpreter return address for the final slot.
       
   246   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
       
   247   int callee_parameters = 0;
       
   248   int callee_locals = 0;
       
   249   int popframe_extra_args = 0;
       
   250   // Create an interpreter return address for the stub to use as its return
       
   251   // address so the skeletal frames are perfectly walkable
       
   252   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
       
   253 
       
   254   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
       
   255   // activation be put back on the expression stack of the caller for reexecution
       
   256   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
       
   257     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
       
   258   }
       
   259 
       
   260   //
       
   261   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
       
   262   // frame_sizes/frame_pcs[1] next oldest frame (int)
       
   263   // frame_sizes/frame_pcs[n] youngest frame (int)
       
   264   //
       
   265   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
       
   266   // owns the space for the return address to it's caller).  Confusing ain't it.
       
   267   //
       
   268   // The vframe array can address vframes with indices running from
       
   269   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
       
   270   // When we create the skeletal frames we need the oldest frame to be in the zero slot
       
   271   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
       
   272   // so things look a little strange in this loop.
       
   273   //
       
   274   for (int index = 0; index < array->frames(); index++ ) {
       
   275     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
       
   276     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
       
   277     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
       
   278     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
       
   279                                                                                                     callee_locals,
       
   280                                                                                                     index == 0,
       
   281                                                                                                     popframe_extra_args);
       
   282     // This pc doesn't have to be perfect just good enough to identify the frame
       
   283     // as interpreted so the skeleton frame will be walkable
       
   284     // The correct pc will be set when the skeleton frame is completely filled out
       
   285     // The final pc we store in the loop is wrong and will be overwritten below
       
   286     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
       
   287 
       
   288     callee_parameters = array->element(index)->method()->size_of_parameters();
       
   289     callee_locals = array->element(index)->method()->max_locals();
       
   290     popframe_extra_args = 0;
       
   291   }
       
   292 
       
   293   // Compute whether the root vframe returns a float or double value.
       
   294   BasicType return_type;
       
   295   {
       
   296     HandleMark hm;
       
   297     methodHandle method(thread, array->element(0)->method());
       
   298     Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
       
   299     return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
       
   300   }
       
   301 
       
   302   // Compute information for handling adapters and adjusting the frame size of the caller.
       
   303   int caller_adjustment = 0;
       
   304 
       
   305   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
       
   306   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
       
   307   // than simply use array->sender.pc(). This requires us to walk the current set of frames
       
   308   //
       
   309   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
       
   310   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
       
   311 
       
   312   // Compute the amount the oldest interpreter frame will have to adjust
       
   313   // its caller's stack by. If the caller is a compiled frame then
       
   314   // we pretend that the callee has no parameters so that the
       
   315   // extension counts for the full amount of locals and not just
       
   316   // locals-parms. This is because without a c2i adapter the parm
       
   317   // area as created by the compiled frame will not be usable by
       
   318   // the interpreter. (Depending on the calling convention there
       
   319   // may not even be enough space).
       
   320 
       
   321   // QQQ I'd rather see this pushed down into last_frame_adjust
       
   322   // and have it take the sender (aka caller).
       
   323 
       
   324   if (deopt_sender.is_compiled_frame()) {
       
   325     caller_adjustment = last_frame_adjust(0, callee_locals);
       
   326   } else if (callee_locals > callee_parameters) {
       
   327     // The caller frame may need extending to accommodate
       
   328     // non-parameter locals of the first unpacked interpreted frame.
       
   329     // Compute that adjustment.
       
   330     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
       
   331   }
       
   332 
       
   333 
       
   334   // If the sender is deoptimized the we must retrieve the address of the handler
       
   335   // since the frame will "magically" show the original pc before the deopt
       
   336   // and we'd undo the deopt.
       
   337 
       
   338   frame_pcs[0] = deopt_sender.raw_pc();
       
   339 
       
   340   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
       
   341 
       
   342   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
       
   343                                       caller_adjustment * BytesPerWord,
       
   344                                       number_of_frames,
       
   345                                       frame_sizes,
       
   346                                       frame_pcs,
       
   347                                       return_type);
       
   348 #if defined(IA32) || defined(AMD64)
       
   349   // We need a way to pass fp to the unpacking code so the skeletal frames
       
   350   // come out correct. This is only needed for x86 because of c2 using ebp
       
   351   // as an allocatable register. So this update is useless (and harmless)
       
   352   // on the other platforms. It would be nice to do this in a different
       
   353   // way but even the old style deoptimization had a problem with deriving
       
   354   // this value. NEEDS_CLEANUP
       
   355   // Note: now that c1 is using c2's deopt blob we must do this on all
       
   356   // x86 based platforms
       
   357   intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
       
   358   *fp_addr = array->sender().fp(); // was adapter_caller
       
   359 #endif /* IA32 || AMD64 */
       
   360 
       
   361   if (array->frames() > 1) {
       
   362     if (VerifyStack && TraceDeoptimization) {
       
   363       tty->print_cr("Deoptimizing method containing inlining");
       
   364     }
       
   365   }
       
   366 
       
   367   array->set_unroll_block(info);
       
   368   return info;
       
   369 }
       
   370 
       
   371 // Called to cleanup deoptimization data structures in normal case
       
   372 // after unpacking to stack and when stack overflow error occurs
       
   373 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
       
   374                                         vframeArray *array) {
       
   375 
       
   376   // Get array if coming from exception
       
   377   if (array == NULL) {
       
   378     array = thread->vframe_array_head();
       
   379   }
       
   380   thread->set_vframe_array_head(NULL);
       
   381 
       
   382   // Free the previous UnrollBlock
       
   383   vframeArray* old_array = thread->vframe_array_last();
       
   384   thread->set_vframe_array_last(array);
       
   385 
       
   386   if (old_array != NULL) {
       
   387     UnrollBlock* old_info = old_array->unroll_block();
       
   388     old_array->set_unroll_block(NULL);
       
   389     delete old_info;
       
   390     delete old_array;
       
   391   }
       
   392 
       
   393   // Deallocate any resource creating in this routine and any ResourceObjs allocated
       
   394   // inside the vframeArray (StackValueCollections)
       
   395 
       
   396   delete thread->deopt_mark();
       
   397   thread->set_deopt_mark(NULL);
       
   398 
       
   399 
       
   400   if (JvmtiExport::can_pop_frame()) {
       
   401 #ifndef CC_INTERP
       
   402     // Regardless of whether we entered this routine with the pending
       
   403     // popframe condition bit set, we should always clear it now
       
   404     thread->clear_popframe_condition();
       
   405 #else
       
   406     // C++ interpeter will clear has_pending_popframe when it enters
       
   407     // with method_resume. For deopt_resume2 we clear it now.
       
   408     if (thread->popframe_forcing_deopt_reexecution())
       
   409         thread->clear_popframe_condition();
       
   410 #endif /* CC_INTERP */
       
   411   }
       
   412 
       
   413   // unpack_frames() is called at the end of the deoptimization handler
       
   414   // and (in C2) at the end of the uncommon trap handler. Note this fact
       
   415   // so that an asynchronous stack walker can work again. This counter is
       
   416   // incremented at the beginning of fetch_unroll_info() and (in C2) at
       
   417   // the beginning of uncommon_trap().
       
   418   thread->dec_in_deopt_handler();
       
   419 }
       
   420 
       
   421 
       
   422 // Return BasicType of value being returned
       
   423 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
       
   424 
       
   425   // We are already active int he special DeoptResourceMark any ResourceObj's we
       
   426   // allocate will be freed at the end of the routine.
       
   427 
       
   428   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
       
   429   // but makes the entry a little slower. There is however a little dance we have to
       
   430   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
       
   431   ResetNoHandleMark rnhm; // No-op in release/product versions
       
   432   HandleMark hm;
       
   433 
       
   434   frame stub_frame = thread->last_frame();
       
   435 
       
   436   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
       
   437   // must point to the vframeArray for the unpack frame.
       
   438   vframeArray* array = thread->vframe_array_head();
       
   439 
       
   440 #ifndef PRODUCT
       
   441   if (TraceDeoptimization) {
       
   442     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
       
   443   }
       
   444 #endif
       
   445 
       
   446   UnrollBlock* info = array->unroll_block();
       
   447 
       
   448   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
       
   449   array->unpack_to_stack(stub_frame, exec_mode);
       
   450 
       
   451   BasicType bt = info->return_type();
       
   452 
       
   453   // If we have an exception pending, claim that the return type is an oop
       
   454   // so the deopt_blob does not overwrite the exception_oop.
       
   455 
       
   456   if (exec_mode == Unpack_exception)
       
   457     bt = T_OBJECT;
       
   458 
       
   459   // Cleanup thread deopt data
       
   460   cleanup_deopt_info(thread, array);
       
   461 
       
   462 #ifndef PRODUCT
       
   463   if (VerifyStack) {
       
   464     ResourceMark res_mark;
       
   465 
       
   466     // Verify that the just-unpacked frames match the interpreter's
       
   467     // notions of expression stack and locals
       
   468     vframeArray* cur_array = thread->vframe_array_last();
       
   469     RegisterMap rm(thread, false);
       
   470     rm.set_include_argument_oops(false);
       
   471     bool is_top_frame = true;
       
   472     int callee_size_of_parameters = 0;
       
   473     int callee_max_locals = 0;
       
   474     for (int i = 0; i < cur_array->frames(); i++) {
       
   475       vframeArrayElement* el = cur_array->element(i);
       
   476       frame* iframe = el->iframe();
       
   477       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
       
   478 
       
   479       // Get the oop map for this bci
       
   480       InterpreterOopMap mask;
       
   481       int cur_invoke_parameter_size = 0;
       
   482       bool try_next_mask = false;
       
   483       int next_mask_expression_stack_size = -1;
       
   484       int top_frame_expression_stack_adjustment = 0;
       
   485       methodHandle mh(thread, iframe->interpreter_frame_method());
       
   486       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
       
   487       BytecodeStream str(mh);
       
   488       str.set_start(iframe->interpreter_frame_bci());
       
   489       int max_bci = mh->code_size();
       
   490       // Get to the next bytecode if possible
       
   491       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
       
   492       // Check to see if we can grab the number of outgoing arguments
       
   493       // at an uncommon trap for an invoke (where the compiler
       
   494       // generates debug info before the invoke has executed)
       
   495       Bytecodes::Code cur_code = str.next();
       
   496       if (cur_code == Bytecodes::_invokevirtual ||
       
   497           cur_code == Bytecodes::_invokespecial ||
       
   498           cur_code == Bytecodes::_invokestatic  ||
       
   499           cur_code == Bytecodes::_invokeinterface) {
       
   500         Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
       
   501         symbolHandle signature(thread, invoke->signature());
       
   502         ArgumentSizeComputer asc(signature);
       
   503         cur_invoke_parameter_size = asc.size();
       
   504         if (cur_code != Bytecodes::_invokestatic) {
       
   505           // Add in receiver
       
   506           ++cur_invoke_parameter_size;
       
   507         }
       
   508       }
       
   509       if (str.bci() < max_bci) {
       
   510         Bytecodes::Code bc = str.next();
       
   511         if (bc >= 0) {
       
   512           // The interpreter oop map generator reports results before
       
   513           // the current bytecode has executed except in the case of
       
   514           // calls. It seems to be hard to tell whether the compiler
       
   515           // has emitted debug information matching the "state before"
       
   516           // a given bytecode or the state after, so we try both
       
   517           switch (cur_code) {
       
   518             case Bytecodes::_invokevirtual:
       
   519             case Bytecodes::_invokespecial:
       
   520             case Bytecodes::_invokestatic:
       
   521             case Bytecodes::_invokeinterface:
       
   522             case Bytecodes::_athrow:
       
   523               break;
       
   524             default: {
       
   525               InterpreterOopMap next_mask;
       
   526               OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
       
   527               next_mask_expression_stack_size = next_mask.expression_stack_size();
       
   528               // Need to subtract off the size of the result type of
       
   529               // the bytecode because this is not described in the
       
   530               // debug info but returned to the interpreter in the TOS
       
   531               // caching register
       
   532               BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
       
   533               if (bytecode_result_type != T_ILLEGAL) {
       
   534                 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
       
   535               }
       
   536               assert(top_frame_expression_stack_adjustment >= 0, "");
       
   537               try_next_mask = true;
       
   538               break;
       
   539             }
       
   540           }
       
   541         }
       
   542       }
       
   543 
       
   544       // Verify stack depth and oops in frame
       
   545       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
       
   546       if (!(
       
   547             /* SPARC */
       
   548             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
       
   549             /* x86 */
       
   550             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
       
   551             (try_next_mask &&
       
   552              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
       
   553                                                                     top_frame_expression_stack_adjustment))) ||
       
   554             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
       
   555             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
       
   556              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
       
   557             )) {
       
   558         ttyLocker ttyl;
       
   559 
       
   560         // Print out some information that will help us debug the problem
       
   561         tty->print_cr("Wrong number of expression stack elements during deoptimization");
       
   562         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
       
   563         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
       
   564                       iframe->interpreter_frame_expression_stack_size());
       
   565         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
       
   566         tty->print_cr("  try_next_mask = %d", try_next_mask);
       
   567         tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
       
   568         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
       
   569         tty->print_cr("  callee_max_locals = %d", callee_max_locals);
       
   570         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
       
   571         tty->print_cr("  exec_mode = %d", exec_mode);
       
   572         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
       
   573         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
       
   574         tty->print_cr("  Interpreted frames:");
       
   575         for (int k = 0; k < cur_array->frames(); k++) {
       
   576           vframeArrayElement* el = cur_array->element(k);
       
   577           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
       
   578         }
       
   579         cur_array->print_on_2(tty);
       
   580         guarantee(false, "wrong number of expression stack elements during deopt");
       
   581       }
       
   582       VerifyOopClosure verify;
       
   583       iframe->oops_interpreted_do(&verify, &rm, false);
       
   584       callee_size_of_parameters = mh->size_of_parameters();
       
   585       callee_max_locals = mh->max_locals();
       
   586       is_top_frame = false;
       
   587     }
       
   588   }
       
   589 #endif /* !PRODUCT */
       
   590 
       
   591 
       
   592   return bt;
       
   593 JRT_END
       
   594 
       
   595 
       
   596 int Deoptimization::deoptimize_dependents() {
       
   597   Threads::deoptimized_wrt_marked_nmethods();
       
   598   return 0;
       
   599 }
       
   600 
       
   601 
       
   602 #ifdef COMPILER2
       
   603 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
       
   604   Handle pending_exception(thread->pending_exception());
       
   605   const char* exception_file = thread->exception_file();
       
   606   int exception_line = thread->exception_line();
       
   607   thread->clear_pending_exception();
       
   608 
       
   609   for (int i = 0; i < objects->length(); i++) {
       
   610     assert(objects->at(i)->is_object(), "invalid debug information");
       
   611     ObjectValue* sv = (ObjectValue*) objects->at(i);
       
   612 
       
   613     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
       
   614     oop obj = NULL;
       
   615 
       
   616     if (k->oop_is_instance()) {
       
   617       instanceKlass* ik = instanceKlass::cast(k());
       
   618       obj = ik->allocate_instance(CHECK_(false));
       
   619     } else if (k->oop_is_typeArray()) {
       
   620       typeArrayKlass* ak = typeArrayKlass::cast(k());
       
   621       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
       
   622       int len = sv->field_size() / type2size[ak->element_type()];
       
   623       obj = ak->allocate(len, CHECK_(false));
       
   624     } else if (k->oop_is_objArray()) {
       
   625       objArrayKlass* ak = objArrayKlass::cast(k());
       
   626       obj = ak->allocate(sv->field_size(), CHECK_(false));
       
   627     }
       
   628 
       
   629     assert(obj != NULL, "allocation failed");
       
   630     assert(sv->value().is_null(), "redundant reallocation");
       
   631     sv->set_value(obj);
       
   632   }
       
   633 
       
   634   if (pending_exception.not_null()) {
       
   635     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
       
   636   }
       
   637 
       
   638   return true;
       
   639 }
       
   640 
       
   641 // This assumes that the fields are stored in ObjectValue in the same order
       
   642 // they are yielded by do_nonstatic_fields.
       
   643 class FieldReassigner: public FieldClosure {
       
   644   frame* _fr;
       
   645   RegisterMap* _reg_map;
       
   646   ObjectValue* _sv;
       
   647   instanceKlass* _ik;
       
   648   oop _obj;
       
   649 
       
   650   int _i;
       
   651 public:
       
   652   FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
       
   653     _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
       
   654 
       
   655   int i() const { return _i; }
       
   656 
       
   657 
       
   658   void do_field(fieldDescriptor* fd) {
       
   659     StackValue* value =
       
   660       StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
       
   661     int offset = fd->offset();
       
   662     switch (fd->field_type()) {
       
   663     case T_OBJECT: case T_ARRAY:
       
   664       assert(value->type() == T_OBJECT, "Agreement.");
       
   665       _obj->obj_field_put(offset, value->get_obj()());
       
   666       break;
       
   667 
       
   668     case T_LONG: case T_DOUBLE: {
       
   669       assert(value->type() == T_INT, "Agreement.");
       
   670       StackValue* low =
       
   671         StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
       
   672       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
       
   673       _obj->long_field_put(offset, res);
       
   674       break;
       
   675     }
       
   676 
       
   677     case T_INT: case T_FLOAT: // 4 bytes.
       
   678       assert(value->type() == T_INT, "Agreement.");
       
   679       _obj->int_field_put(offset, (jint)value->get_int());
       
   680       break;
       
   681 
       
   682     case T_SHORT: case T_CHAR: // 2 bytes
       
   683       assert(value->type() == T_INT, "Agreement.");
       
   684       _obj->short_field_put(offset, (jshort)value->get_int());
       
   685       break;
       
   686 
       
   687     case T_BOOLEAN: // 1 byte
       
   688       assert(value->type() == T_INT, "Agreement.");
       
   689       _obj->bool_field_put(offset, (jboolean)value->get_int());
       
   690       break;
       
   691 
       
   692     default:
       
   693       ShouldNotReachHere();
       
   694     }
       
   695     _i++;
       
   696   }
       
   697 };
       
   698 
       
   699 // restore elements of an eliminated type array
       
   700 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
       
   701   StackValue* low;
       
   702   jlong lval;
       
   703   int index = 0;
       
   704 
       
   705   for (int i = 0; i < sv->field_size(); i++) {
       
   706     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
       
   707     switch(type) {
       
   708       case T_BOOLEAN: obj->bool_at_put (index, (jboolean) value->get_int()); break;
       
   709       case T_BYTE:    obj->byte_at_put (index, (jbyte)    value->get_int()); break;
       
   710       case T_CHAR:    obj->char_at_put (index, (jchar)    value->get_int()); break;
       
   711       case T_SHORT:   obj->short_at_put(index, (jshort)   value->get_int()); break;
       
   712       case T_INT:     obj->int_at_put  (index, (jint)     value->get_int()); break;
       
   713       case T_FLOAT:   obj->float_at_put(index, (jfloat)   value->get_int()); break;
       
   714       case T_LONG:
       
   715       case T_DOUBLE:
       
   716         low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
       
   717         lval = jlong_from((jint)value->get_int(), (jint)low->get_int());
       
   718         sv->value()->long_field_put(index, lval);
       
   719         break;
       
   720       default:
       
   721         ShouldNotReachHere();
       
   722     }
       
   723     index++;
       
   724   }
       
   725 }
       
   726 
       
   727 
       
   728 // restore fields of an eliminated object array
       
   729 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
       
   730   for (int i = 0; i < sv->field_size(); i++) {
       
   731     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
       
   732     assert(value->type() == T_OBJECT, "object element expected");
       
   733     obj->obj_at_put(i, value->get_obj()());
       
   734   }
       
   735 }
       
   736 
       
   737 
       
   738 // restore fields of all eliminated objects and arrays
       
   739 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
       
   740   for (int i = 0; i < objects->length(); i++) {
       
   741     ObjectValue* sv = (ObjectValue*) objects->at(i);
       
   742     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
       
   743     Handle obj = sv->value();
       
   744     assert(obj.not_null(), "reallocation was missed");
       
   745 
       
   746     if (k->oop_is_instance()) {
       
   747       instanceKlass* ik = instanceKlass::cast(k());
       
   748       FieldReassigner reassign(fr, reg_map, sv, obj());
       
   749       ik->do_nonstatic_fields(&reassign);
       
   750     } else if (k->oop_is_typeArray()) {
       
   751       typeArrayKlass* ak = typeArrayKlass::cast(k());
       
   752       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
       
   753     } else if (k->oop_is_objArray()) {
       
   754       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
       
   755     }
       
   756   }
       
   757 }
       
   758 
       
   759 
       
   760 // relock objects for which synchronization was eliminated
       
   761 void Deoptimization::relock_objects(frame* fr, RegisterMap* reg_map, GrowableArray<MonitorValue*>* monitors) {
       
   762   for (int i = 0; i < monitors->length(); i++) {
       
   763     MonitorValue* mv = monitors->at(i);
       
   764     StackValue* owner = StackValue::create_stack_value(fr, reg_map, mv->owner());
       
   765     if (mv->eliminated()) {
       
   766       Handle obj = owner->get_obj();
       
   767       assert(obj.not_null(), "reallocation was missed");
       
   768       BasicLock* lock = StackValue::resolve_monitor_lock(fr, mv->basic_lock());
       
   769       lock->set_displaced_header(obj->mark());
       
   770       obj->set_mark((markOop) lock);
       
   771     }
       
   772     assert(owner->get_obj()->is_locked(), "object must be locked now");
       
   773   }
       
   774 }
       
   775 
       
   776 
       
   777 #ifndef PRODUCT
       
   778 // print information about reallocated objects
       
   779 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
       
   780   fieldDescriptor fd;
       
   781 
       
   782   for (int i = 0; i < objects->length(); i++) {
       
   783     ObjectValue* sv = (ObjectValue*) objects->at(i);
       
   784     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
       
   785     Handle obj = sv->value();
       
   786 
       
   787     tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
       
   788     k->as_klassOop()->print_value();
       
   789     tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
       
   790     tty->cr();
       
   791 
       
   792     if (Verbose) {
       
   793       k->oop_print_on(obj(), tty);
       
   794     }
       
   795   }
       
   796 }
       
   797 #endif
       
   798 #endif // COMPILER2
       
   799 
       
   800 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
       
   801 
       
   802 #ifndef PRODUCT
       
   803   if (TraceDeoptimization) {
       
   804     ttyLocker ttyl;
       
   805     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
       
   806     fr.print_on(tty);
       
   807     tty->print_cr("     Virtual frames (innermost first):");
       
   808     for (int index = 0; index < chunk->length(); index++) {
       
   809       compiledVFrame* vf = chunk->at(index);
       
   810       tty->print("       %2d - ", index);
       
   811       vf->print_value();
       
   812       int bci = chunk->at(index)->raw_bci();
       
   813       const char* code_name;
       
   814       if (bci == SynchronizationEntryBCI) {
       
   815         code_name = "sync entry";
       
   816       } else {
       
   817         Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
       
   818         code_name = Bytecodes::name(code);
       
   819       }
       
   820       tty->print(" - %s", code_name);
       
   821       tty->print_cr(" @ bci %d ", bci);
       
   822       if (Verbose) {
       
   823         vf->print();
       
   824         tty->cr();
       
   825       }
       
   826     }
       
   827   }
       
   828 #endif
       
   829 
       
   830   // Register map for next frame (used for stack crawl).  We capture
       
   831   // the state of the deopt'ing frame's caller.  Thus if we need to
       
   832   // stuff a C2I adapter we can properly fill in the callee-save
       
   833   // register locations.
       
   834   frame caller = fr.sender(reg_map);
       
   835   int frame_size = caller.sp() - fr.sp();
       
   836 
       
   837   frame sender = caller;
       
   838 
       
   839   // Since the Java thread being deoptimized will eventually adjust it's own stack,
       
   840   // the vframeArray containing the unpacking information is allocated in the C heap.
       
   841   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
       
   842   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
       
   843 
       
   844   // Compare the vframeArray to the collected vframes
       
   845   assert(array->structural_compare(thread, chunk), "just checking");
       
   846   Events::log("# vframes = %d", (intptr_t)chunk->length());
       
   847 
       
   848 #ifndef PRODUCT
       
   849   if (TraceDeoptimization) {
       
   850     ttyLocker ttyl;
       
   851     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
       
   852     if (Verbose) {
       
   853       int count = 0;
       
   854       // this used to leak deoptimizedVFrame like it was going out of style!!!
       
   855       for (int index = 0; index < array->frames(); index++ ) {
       
   856         vframeArrayElement* e = array->element(index);
       
   857         e->print(tty);
       
   858 
       
   859         /*
       
   860           No printing yet.
       
   861         array->vframe_at(index)->print_activation(count++);
       
   862         // better as...
       
   863         array->print_activation_for(index, count++);
       
   864         */
       
   865       }
       
   866     }
       
   867   }
       
   868 #endif // PRODUCT
       
   869 
       
   870   return array;
       
   871 }
       
   872 
       
   873 
       
   874 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
       
   875   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
       
   876   for (int i = 0; i < monitors->length(); i++) {
       
   877     MonitorInfo* mon_info = monitors->at(i);
       
   878     if (mon_info->owner() != NULL) {
       
   879       objects_to_revoke->append(Handle(mon_info->owner()));
       
   880     }
       
   881   }
       
   882 }
       
   883 
       
   884 
       
   885 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
       
   886   if (!UseBiasedLocking) {
       
   887     return;
       
   888   }
       
   889 
       
   890   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
       
   891 
       
   892   // Unfortunately we don't have a RegisterMap available in most of
       
   893   // the places we want to call this routine so we need to walk the
       
   894   // stack again to update the register map.
       
   895   if (map == NULL || !map->update_map()) {
       
   896     StackFrameStream sfs(thread, true);
       
   897     bool found = false;
       
   898     while (!found && !sfs.is_done()) {
       
   899       frame* cur = sfs.current();
       
   900       sfs.next();
       
   901       found = cur->id() == fr.id();
       
   902     }
       
   903     assert(found, "frame to be deoptimized not found on target thread's stack");
       
   904     map = sfs.register_map();
       
   905   }
       
   906 
       
   907   vframe* vf = vframe::new_vframe(&fr, map, thread);
       
   908   compiledVFrame* cvf = compiledVFrame::cast(vf);
       
   909   // Revoke monitors' biases in all scopes
       
   910   while (!cvf->is_top()) {
       
   911     collect_monitors(cvf, objects_to_revoke);
       
   912     cvf = compiledVFrame::cast(cvf->sender());
       
   913   }
       
   914   collect_monitors(cvf, objects_to_revoke);
       
   915 
       
   916   if (SafepointSynchronize::is_at_safepoint()) {
       
   917     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
       
   918   } else {
       
   919     BiasedLocking::revoke(objects_to_revoke);
       
   920   }
       
   921 }
       
   922 
       
   923 
       
   924 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
       
   925   if (!UseBiasedLocking) {
       
   926     return;
       
   927   }
       
   928 
       
   929   assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
       
   930   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
       
   931   for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
       
   932     if (jt->has_last_Java_frame()) {
       
   933       StackFrameStream sfs(jt, true);
       
   934       while (!sfs.is_done()) {
       
   935         frame* cur = sfs.current();
       
   936         if (cb->contains(cur->pc())) {
       
   937           vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
       
   938           compiledVFrame* cvf = compiledVFrame::cast(vf);
       
   939           // Revoke monitors' biases in all scopes
       
   940           while (!cvf->is_top()) {
       
   941             collect_monitors(cvf, objects_to_revoke);
       
   942             cvf = compiledVFrame::cast(cvf->sender());
       
   943           }
       
   944           collect_monitors(cvf, objects_to_revoke);
       
   945         }
       
   946         sfs.next();
       
   947       }
       
   948     }
       
   949   }
       
   950   BiasedLocking::revoke_at_safepoint(objects_to_revoke);
       
   951 }
       
   952 
       
   953 
       
   954 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
       
   955   assert(fr.can_be_deoptimized(), "checking frame type");
       
   956 
       
   957   gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
       
   958 
       
   959   EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
       
   960 
       
   961   // Patch the nmethod so that when execution returns to it we will
       
   962   // deopt the execution state and return to the interpreter.
       
   963   fr.deoptimize(thread);
       
   964 }
       
   965 
       
   966 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
       
   967   // Deoptimize only if the frame comes from compile code.
       
   968   // Do not deoptimize the frame which is already patched
       
   969   // during the execution of the loops below.
       
   970   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
       
   971     return;
       
   972   }
       
   973   ResourceMark rm;
       
   974   DeoptimizationMarker dm;
       
   975   if (UseBiasedLocking) {
       
   976     revoke_biases_of_monitors(thread, fr, map);
       
   977   }
       
   978   deoptimize_single_frame(thread, fr);
       
   979 
       
   980 }
       
   981 
       
   982 
       
   983 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
       
   984   // Compute frame and register map based on thread and sp.
       
   985   RegisterMap reg_map(thread, UseBiasedLocking);
       
   986   frame fr = thread->last_frame();
       
   987   while (fr.id() != id) {
       
   988     fr = fr.sender(&reg_map);
       
   989   }
       
   990   deoptimize(thread, fr, &reg_map);
       
   991 }
       
   992 
       
   993 
       
   994 // JVMTI PopFrame support
       
   995 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
       
   996 {
       
   997   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
       
   998 }
       
   999 JRT_END
       
  1000 
       
  1001 
       
  1002 #ifdef COMPILER2
       
  1003 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
       
  1004   // in case of an unresolved klass entry, load the class.
       
  1005   if (constant_pool->tag_at(index).is_unresolved_klass()) {
       
  1006     klassOop tk = constant_pool->klass_at(index, CHECK);
       
  1007     return;
       
  1008   }
       
  1009 
       
  1010   if (!constant_pool->tag_at(index).is_symbol()) return;
       
  1011 
       
  1012   Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
       
  1013   symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
       
  1014 
       
  1015   // class name?
       
  1016   if (symbol->byte_at(0) != '(') {
       
  1017     Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
       
  1018     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
       
  1019     return;
       
  1020   }
       
  1021 
       
  1022   // then it must be a signature!
       
  1023   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
       
  1024     if (ss.is_object()) {
       
  1025       symbolOop s = ss.as_symbol(CHECK);
       
  1026       symbolHandle class_name (THREAD, s);
       
  1027       Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
       
  1028       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
       
  1029     }
       
  1030   }
       
  1031 }
       
  1032 
       
  1033 
       
  1034 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
       
  1035   EXCEPTION_MARK;
       
  1036   load_class_by_index(constant_pool, index, THREAD);
       
  1037   if (HAS_PENDING_EXCEPTION) {
       
  1038     // Exception happened during classloading. We ignore the exception here, since it
       
  1039     // is going to be rethrown since the current activation is going to be deoptimzied and
       
  1040     // the interpreter will re-execute the bytecode.
       
  1041     CLEAR_PENDING_EXCEPTION;
       
  1042   }
       
  1043 }
       
  1044 
       
  1045 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
       
  1046   HandleMark hm;
       
  1047 
       
  1048   // uncommon_trap() is called at the beginning of the uncommon trap
       
  1049   // handler. Note this fact before we start generating temporary frames
       
  1050   // that can confuse an asynchronous stack walker. This counter is
       
  1051   // decremented at the end of unpack_frames().
       
  1052   thread->inc_in_deopt_handler();
       
  1053 
       
  1054   // We need to update the map if we have biased locking.
       
  1055   RegisterMap reg_map(thread, UseBiasedLocking);
       
  1056   frame stub_frame = thread->last_frame();
       
  1057   frame fr = stub_frame.sender(&reg_map);
       
  1058   // Make sure the calling nmethod is not getting deoptimized and removed
       
  1059   // before we are done with it.
       
  1060   nmethodLocker nl(fr.pc());
       
  1061 
       
  1062   {
       
  1063     ResourceMark rm;
       
  1064 
       
  1065     // Revoke biases of any monitors in the frame to ensure we can migrate them
       
  1066     revoke_biases_of_monitors(thread, fr, &reg_map);
       
  1067 
       
  1068     DeoptReason reason = trap_request_reason(trap_request);
       
  1069     DeoptAction action = trap_request_action(trap_request);
       
  1070     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
       
  1071 
       
  1072     Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
       
  1073     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
       
  1074     compiledVFrame* cvf = compiledVFrame::cast(vf);
       
  1075 
       
  1076     nmethod* nm = cvf->code();
       
  1077 
       
  1078     ScopeDesc*      trap_scope  = cvf->scope();
       
  1079     methodHandle    trap_method = trap_scope->method();
       
  1080     int             trap_bci    = trap_scope->bci();
       
  1081     Bytecodes::Code trap_bc     = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
       
  1082 
       
  1083     // Record this event in the histogram.
       
  1084     gather_statistics(reason, action, trap_bc);
       
  1085 
       
  1086     // Ensure that we can record deopt. history:
       
  1087     bool create_if_missing = ProfileTraps;
       
  1088 
       
  1089     methodDataHandle trap_mdo
       
  1090       (THREAD, get_method_data(thread, trap_method, create_if_missing));
       
  1091 
       
  1092     // Print a bunch of diagnostics, if requested.
       
  1093     if (TraceDeoptimization || LogCompilation) {
       
  1094       ResourceMark rm;
       
  1095       ttyLocker ttyl;
       
  1096       char buf[100];
       
  1097       if (xtty != NULL) {
       
  1098         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
       
  1099                          os::current_thread_id(),
       
  1100                          format_trap_request(buf, sizeof(buf), trap_request));
       
  1101         nm->log_identity(xtty);
       
  1102       }
       
  1103       symbolHandle class_name;
       
  1104       bool unresolved = false;
       
  1105       if (unloaded_class_index >= 0) {
       
  1106         constantPoolHandle constants (THREAD, trap_method->constants());
       
  1107         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
       
  1108           class_name = symbolHandle(THREAD,
       
  1109             constants->klass_name_at(unloaded_class_index));
       
  1110           unresolved = true;
       
  1111           if (xtty != NULL)
       
  1112             xtty->print(" unresolved='1'");
       
  1113         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
       
  1114           class_name = symbolHandle(THREAD,
       
  1115             constants->symbol_at(unloaded_class_index));
       
  1116         }
       
  1117         if (xtty != NULL)
       
  1118           xtty->name(class_name);
       
  1119       }
       
  1120       if (xtty != NULL && trap_mdo.not_null()) {
       
  1121         // Dump the relevant MDO state.
       
  1122         // This is the deopt count for the current reason, any previous
       
  1123         // reasons or recompiles seen at this point.
       
  1124         int dcnt = trap_mdo->trap_count(reason);
       
  1125         if (dcnt != 0)
       
  1126           xtty->print(" count='%d'", dcnt);
       
  1127         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
       
  1128         int dos = (pdata == NULL)? 0: pdata->trap_state();
       
  1129         if (dos != 0) {
       
  1130           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
       
  1131           if (trap_state_is_recompiled(dos)) {
       
  1132             int recnt2 = trap_mdo->overflow_recompile_count();
       
  1133             if (recnt2 != 0)
       
  1134               xtty->print(" recompiles2='%d'", recnt2);
       
  1135           }
       
  1136         }
       
  1137       }
       
  1138       if (xtty != NULL) {
       
  1139         xtty->stamp();
       
  1140         xtty->end_head();
       
  1141       }
       
  1142       if (TraceDeoptimization) {  // make noise on the tty
       
  1143         tty->print("Uncommon trap occurred in");
       
  1144         nm->method()->print_short_name(tty);
       
  1145         tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
       
  1146                    fr.pc(),
       
  1147                    (int) os::current_thread_id(),
       
  1148                    trap_reason_name(reason),
       
  1149                    trap_action_name(action),
       
  1150                    unloaded_class_index);
       
  1151         if (class_name.not_null()) {
       
  1152           tty->print(unresolved ? " unresolved class: " : " symbol: ");
       
  1153           class_name->print_symbol_on(tty);
       
  1154         }
       
  1155         tty->cr();
       
  1156       }
       
  1157       if (xtty != NULL) {
       
  1158         // Log the precise location of the trap.
       
  1159         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
       
  1160           xtty->begin_elem("jvms bci='%d'", sd->bci());
       
  1161           xtty->method(sd->method());
       
  1162           xtty->end_elem();
       
  1163           if (sd->is_top())  break;
       
  1164         }
       
  1165         xtty->tail("uncommon_trap");
       
  1166       }
       
  1167     }
       
  1168     // (End diagnostic printout.)
       
  1169 
       
  1170     // Load class if necessary
       
  1171     if (unloaded_class_index >= 0) {
       
  1172       constantPoolHandle constants(THREAD, trap_method->constants());
       
  1173       load_class_by_index(constants, unloaded_class_index);
       
  1174     }
       
  1175 
       
  1176     // Flush the nmethod if necessary and desirable.
       
  1177     //
       
  1178     // We need to avoid situations where we are re-flushing the nmethod
       
  1179     // because of a hot deoptimization site.  Repeated flushes at the same
       
  1180     // point need to be detected by the compiler and avoided.  If the compiler
       
  1181     // cannot avoid them (or has a bug and "refuses" to avoid them), this
       
  1182     // module must take measures to avoid an infinite cycle of recompilation
       
  1183     // and deoptimization.  There are several such measures:
       
  1184     //
       
  1185     //   1. If a recompilation is ordered a second time at some site X
       
  1186     //   and for the same reason R, the action is adjusted to 'reinterpret',
       
  1187     //   to give the interpreter time to exercise the method more thoroughly.
       
  1188     //   If this happens, the method's overflow_recompile_count is incremented.
       
  1189     //
       
  1190     //   2. If the compiler fails to reduce the deoptimization rate, then
       
  1191     //   the method's overflow_recompile_count will begin to exceed the set
       
  1192     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
       
  1193     //   is adjusted to 'make_not_compilable', and the method is abandoned
       
  1194     //   to the interpreter.  This is a performance hit for hot methods,
       
  1195     //   but is better than a disastrous infinite cycle of recompilations.
       
  1196     //   (Actually, only the method containing the site X is abandoned.)
       
  1197     //
       
  1198     //   3. In parallel with the previous measures, if the total number of
       
  1199     //   recompilations of a method exceeds the much larger set limit
       
  1200     //   PerMethodRecompilationCutoff, the method is abandoned.
       
  1201     //   This should only happen if the method is very large and has
       
  1202     //   many "lukewarm" deoptimizations.  The code which enforces this
       
  1203     //   limit is elsewhere (class nmethod, class methodOopDesc).
       
  1204     //
       
  1205     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
       
  1206     // to recompile at each bytecode independently of the per-BCI cutoff.
       
  1207     //
       
  1208     // The decision to update code is up to the compiler, and is encoded
       
  1209     // in the Action_xxx code.  If the compiler requests Action_none
       
  1210     // no trap state is changed, no compiled code is changed, and the
       
  1211     // computation suffers along in the interpreter.
       
  1212     //
       
  1213     // The other action codes specify various tactics for decompilation
       
  1214     // and recompilation.  Action_maybe_recompile is the loosest, and
       
  1215     // allows the compiled code to stay around until enough traps are seen,
       
  1216     // and until the compiler gets around to recompiling the trapping method.
       
  1217     //
       
  1218     // The other actions cause immediate removal of the present code.
       
  1219 
       
  1220     bool update_trap_state = true;
       
  1221     bool make_not_entrant = false;
       
  1222     bool make_not_compilable = false;
       
  1223     bool reset_counters = false;
       
  1224     switch (action) {
       
  1225     case Action_none:
       
  1226       // Keep the old code.
       
  1227       update_trap_state = false;
       
  1228       break;
       
  1229     case Action_maybe_recompile:
       
  1230       // Do not need to invalidate the present code, but we can
       
  1231       // initiate another
       
  1232       // Start compiler without (necessarily) invalidating the nmethod.
       
  1233       // The system will tolerate the old code, but new code should be
       
  1234       // generated when possible.
       
  1235       break;
       
  1236     case Action_reinterpret:
       
  1237       // Go back into the interpreter for a while, and then consider
       
  1238       // recompiling form scratch.
       
  1239       make_not_entrant = true;
       
  1240       // Reset invocation counter for outer most method.
       
  1241       // This will allow the interpreter to exercise the bytecodes
       
  1242       // for a while before recompiling.
       
  1243       // By contrast, Action_make_not_entrant is immediate.
       
  1244       //
       
  1245       // Note that the compiler will track null_check, null_assert,
       
  1246       // range_check, and class_check events and log them as if they
       
  1247       // had been traps taken from compiled code.  This will update
       
  1248       // the MDO trap history so that the next compilation will
       
  1249       // properly detect hot trap sites.
       
  1250       reset_counters = true;
       
  1251       break;
       
  1252     case Action_make_not_entrant:
       
  1253       // Request immediate recompilation, and get rid of the old code.
       
  1254       // Make them not entrant, so next time they are called they get
       
  1255       // recompiled.  Unloaded classes are loaded now so recompile before next
       
  1256       // time they are called.  Same for uninitialized.  The interpreter will
       
  1257       // link the missing class, if any.
       
  1258       make_not_entrant = true;
       
  1259       break;
       
  1260     case Action_make_not_compilable:
       
  1261       // Give up on compiling this method at all.
       
  1262       make_not_entrant = true;
       
  1263       make_not_compilable = true;
       
  1264       break;
       
  1265     default:
       
  1266       ShouldNotReachHere();
       
  1267     }
       
  1268 
       
  1269     // Setting +ProfileTraps fixes the following, on all platforms:
       
  1270     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
       
  1271     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
       
  1272     // recompile relies on a methodDataOop to record heroic opt failures.
       
  1273 
       
  1274     // Whether the interpreter is producing MDO data or not, we also need
       
  1275     // to use the MDO to detect hot deoptimization points and control
       
  1276     // aggressive optimization.
       
  1277     if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
       
  1278       assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
       
  1279       uint this_trap_count = 0;
       
  1280       bool maybe_prior_trap = false;
       
  1281       bool maybe_prior_recompile = false;
       
  1282       ProfileData* pdata
       
  1283         = query_update_method_data(trap_mdo, trap_bci, reason,
       
  1284                                    //outputs:
       
  1285                                    this_trap_count,
       
  1286                                    maybe_prior_trap,
       
  1287                                    maybe_prior_recompile);
       
  1288       // Because the interpreter also counts null, div0, range, and class
       
  1289       // checks, these traps from compiled code are double-counted.
       
  1290       // This is harmless; it just means that the PerXTrapLimit values
       
  1291       // are in effect a little smaller than they look.
       
  1292 
       
  1293       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
       
  1294       if (per_bc_reason != Reason_none) {
       
  1295         // Now take action based on the partially known per-BCI history.
       
  1296         if (maybe_prior_trap
       
  1297             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
       
  1298           // If there are too many traps at this BCI, force a recompile.
       
  1299           // This will allow the compiler to see the limit overflow, and
       
  1300           // take corrective action, if possible.  The compiler generally
       
  1301           // does not use the exact PerBytecodeTrapLimit value, but instead
       
  1302           // changes its tactics if it sees any traps at all.  This provides
       
  1303           // a little hysteresis, delaying a recompile until a trap happens
       
  1304           // several times.
       
  1305           //
       
  1306           // Actually, since there is only one bit of counter per BCI,
       
  1307           // the possible per-BCI counts are {0,1,(per-method count)}.
       
  1308           // This produces accurate results if in fact there is only
       
  1309           // one hot trap site, but begins to get fuzzy if there are
       
  1310           // many sites.  For example, if there are ten sites each
       
  1311           // trapping two or more times, they each get the blame for
       
  1312           // all of their traps.
       
  1313           make_not_entrant = true;
       
  1314         }
       
  1315 
       
  1316         // Detect repeated recompilation at the same BCI, and enforce a limit.
       
  1317         if (make_not_entrant && maybe_prior_recompile) {
       
  1318           // More than one recompile at this point.
       
  1319           trap_mdo->inc_overflow_recompile_count();
       
  1320           if (maybe_prior_trap
       
  1321               && ((uint)trap_mdo->overflow_recompile_count()
       
  1322                   > (uint)PerBytecodeRecompilationCutoff)) {
       
  1323             // Give up on the method containing the bad BCI.
       
  1324             if (trap_method() == nm->method()) {
       
  1325               make_not_compilable = true;
       
  1326             } else {
       
  1327               trap_method->set_not_compilable();
       
  1328               // But give grace to the enclosing nm->method().
       
  1329             }
       
  1330           }
       
  1331         }
       
  1332       } else {
       
  1333         // For reasons which are not recorded per-bytecode, we simply
       
  1334         // force recompiles unconditionally.
       
  1335         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
       
  1336         make_not_entrant = true;
       
  1337       }
       
  1338 
       
  1339       // Go back to the compiler if there are too many traps in this method.
       
  1340       if (this_trap_count >= (uint)PerMethodTrapLimit) {
       
  1341         // If there are too many traps in this method, force a recompile.
       
  1342         // This will allow the compiler to see the limit overflow, and
       
  1343         // take corrective action, if possible.
       
  1344         // (This condition is an unlikely backstop only, because the
       
  1345         // PerBytecodeTrapLimit is more likely to take effect first,
       
  1346         // if it is applicable.)
       
  1347         make_not_entrant = true;
       
  1348       }
       
  1349 
       
  1350       // Here's more hysteresis:  If there has been a recompile at
       
  1351       // this trap point already, run the method in the interpreter
       
  1352       // for a while to exercise it more thoroughly.
       
  1353       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
       
  1354         reset_counters = true;
       
  1355       }
       
  1356 
       
  1357       if (make_not_entrant && pdata != NULL) {
       
  1358         // Record the recompilation event, if any.
       
  1359         int tstate0 = pdata->trap_state();
       
  1360         int tstate1 = trap_state_set_recompiled(tstate0, true);
       
  1361         if (tstate1 != tstate0)
       
  1362           pdata->set_trap_state(tstate1);
       
  1363       }
       
  1364     }
       
  1365 
       
  1366     // Take requested actions on the method:
       
  1367 
       
  1368     // Reset invocation counters
       
  1369     if (reset_counters) {
       
  1370       if (nm->is_osr_method())
       
  1371         reset_invocation_counter(trap_scope, CompileThreshold);
       
  1372       else
       
  1373         reset_invocation_counter(trap_scope);
       
  1374     }
       
  1375 
       
  1376     // Recompile
       
  1377     if (make_not_entrant) {
       
  1378       nm->make_not_entrant();
       
  1379     }
       
  1380 
       
  1381     // Give up compiling
       
  1382     if (make_not_compilable) {
       
  1383       assert(make_not_entrant, "consistent");
       
  1384       nm->method()->set_not_compilable();
       
  1385     }
       
  1386 
       
  1387   } // Free marked resources
       
  1388 
       
  1389 }
       
  1390 JRT_END
       
  1391 
       
  1392 methodDataOop
       
  1393 Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
       
  1394                                 bool create_if_missing) {
       
  1395   Thread* THREAD = thread;
       
  1396   methodDataOop mdo = m()->method_data();
       
  1397   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
       
  1398     // Build an MDO.  Ignore errors like OutOfMemory;
       
  1399     // that simply means we won't have an MDO to update.
       
  1400     methodOopDesc::build_interpreter_method_data(m, THREAD);
       
  1401     if (HAS_PENDING_EXCEPTION) {
       
  1402       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
       
  1403       CLEAR_PENDING_EXCEPTION;
       
  1404     }
       
  1405     mdo = m()->method_data();
       
  1406   }
       
  1407   return mdo;
       
  1408 }
       
  1409 
       
  1410 ProfileData*
       
  1411 Deoptimization::query_update_method_data(methodDataHandle trap_mdo,
       
  1412                                          int trap_bci,
       
  1413                                          Deoptimization::DeoptReason reason,
       
  1414                                          //outputs:
       
  1415                                          uint& ret_this_trap_count,
       
  1416                                          bool& ret_maybe_prior_trap,
       
  1417                                          bool& ret_maybe_prior_recompile) {
       
  1418   uint prior_trap_count = trap_mdo->trap_count(reason);
       
  1419   uint this_trap_count  = trap_mdo->inc_trap_count(reason);
       
  1420 
       
  1421   // If the runtime cannot find a place to store trap history,
       
  1422   // it is estimated based on the general condition of the method.
       
  1423   // If the method has ever been recompiled, or has ever incurred
       
  1424   // a trap with the present reason , then this BCI is assumed
       
  1425   // (pessimistically) to be the culprit.
       
  1426   bool maybe_prior_trap      = (prior_trap_count != 0);
       
  1427   bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
       
  1428   ProfileData* pdata = NULL;
       
  1429 
       
  1430 
       
  1431   // For reasons which are recorded per bytecode, we check per-BCI data.
       
  1432   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
       
  1433   if (per_bc_reason != Reason_none) {
       
  1434     // Find the profile data for this BCI.  If there isn't one,
       
  1435     // try to allocate one from the MDO's set of spares.
       
  1436     // This will let us detect a repeated trap at this point.
       
  1437     pdata = trap_mdo->allocate_bci_to_data(trap_bci);
       
  1438 
       
  1439     if (pdata != NULL) {
       
  1440       // Query the trap state of this profile datum.
       
  1441       int tstate0 = pdata->trap_state();
       
  1442       if (!trap_state_has_reason(tstate0, per_bc_reason))
       
  1443         maybe_prior_trap = false;
       
  1444       if (!trap_state_is_recompiled(tstate0))
       
  1445         maybe_prior_recompile = false;
       
  1446 
       
  1447       // Update the trap state of this profile datum.
       
  1448       int tstate1 = tstate0;
       
  1449       // Record the reason.
       
  1450       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
       
  1451       // Store the updated state on the MDO, for next time.
       
  1452       if (tstate1 != tstate0)
       
  1453         pdata->set_trap_state(tstate1);
       
  1454     } else {
       
  1455       if (LogCompilation && xtty != NULL)
       
  1456         // Missing MDP?  Leave a small complaint in the log.
       
  1457         xtty->elem("missing_mdp bci='%d'", trap_bci);
       
  1458     }
       
  1459   }
       
  1460 
       
  1461   // Return results:
       
  1462   ret_this_trap_count = this_trap_count;
       
  1463   ret_maybe_prior_trap = maybe_prior_trap;
       
  1464   ret_maybe_prior_recompile = maybe_prior_recompile;
       
  1465   return pdata;
       
  1466 }
       
  1467 
       
  1468 void
       
  1469 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
       
  1470   ResourceMark rm;
       
  1471   // Ignored outputs:
       
  1472   uint ignore_this_trap_count;
       
  1473   bool ignore_maybe_prior_trap;
       
  1474   bool ignore_maybe_prior_recompile;
       
  1475   query_update_method_data(trap_mdo, trap_bci,
       
  1476                            (DeoptReason)reason,
       
  1477                            ignore_this_trap_count,
       
  1478                            ignore_maybe_prior_trap,
       
  1479                            ignore_maybe_prior_recompile);
       
  1480 }
       
  1481 
       
  1482 void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
       
  1483   ScopeDesc* sd = trap_scope;
       
  1484   for (; !sd->is_top(); sd = sd->sender()) {
       
  1485     // Reset ICs of inlined methods, since they can trigger compilations also.
       
  1486     sd->method()->invocation_counter()->reset();
       
  1487   }
       
  1488   InvocationCounter* c = sd->method()->invocation_counter();
       
  1489   if (top_count != _no_count) {
       
  1490     // It was an OSR method, so bump the count higher.
       
  1491     c->set(c->state(), top_count);
       
  1492   } else {
       
  1493     c->reset();
       
  1494   }
       
  1495   sd->method()->backedge_counter()->reset();
       
  1496 }
       
  1497 
       
  1498 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
       
  1499 
       
  1500   // Still in Java no safepoints
       
  1501   {
       
  1502     // This enters VM and may safepoint
       
  1503     uncommon_trap_inner(thread, trap_request);
       
  1504   }
       
  1505   return fetch_unroll_info_helper(thread);
       
  1506 }
       
  1507 
       
  1508 // Local derived constants.
       
  1509 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
       
  1510 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
       
  1511 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
       
  1512 
       
  1513 //---------------------------trap_state_reason---------------------------------
       
  1514 Deoptimization::DeoptReason
       
  1515 Deoptimization::trap_state_reason(int trap_state) {
       
  1516   // This assert provides the link between the width of DataLayout::trap_bits
       
  1517   // and the encoding of "recorded" reasons.  It ensures there are enough
       
  1518   // bits to store all needed reasons in the per-BCI MDO profile.
       
  1519   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
       
  1520   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
       
  1521   trap_state -= recompile_bit;
       
  1522   if (trap_state == DS_REASON_MASK) {
       
  1523     return Reason_many;
       
  1524   } else {
       
  1525     assert((int)Reason_none == 0, "state=0 => Reason_none");
       
  1526     return (DeoptReason)trap_state;
       
  1527   }
       
  1528 }
       
  1529 //-------------------------trap_state_has_reason-------------------------------
       
  1530 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
       
  1531   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
       
  1532   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
       
  1533   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
       
  1534   trap_state -= recompile_bit;
       
  1535   if (trap_state == DS_REASON_MASK) {
       
  1536     return -1;  // true, unspecifically (bottom of state lattice)
       
  1537   } else if (trap_state == reason) {
       
  1538     return 1;   // true, definitely
       
  1539   } else if (trap_state == 0) {
       
  1540     return 0;   // false, definitely (top of state lattice)
       
  1541   } else {
       
  1542     return 0;   // false, definitely
       
  1543   }
       
  1544 }
       
  1545 //-------------------------trap_state_add_reason-------------------------------
       
  1546 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
       
  1547   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
       
  1548   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
       
  1549   trap_state -= recompile_bit;
       
  1550   if (trap_state == DS_REASON_MASK) {
       
  1551     return trap_state + recompile_bit;     // already at state lattice bottom
       
  1552   } else if (trap_state == reason) {
       
  1553     return trap_state + recompile_bit;     // the condition is already true
       
  1554   } else if (trap_state == 0) {
       
  1555     return reason + recompile_bit;          // no condition has yet been true
       
  1556   } else {
       
  1557     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
       
  1558   }
       
  1559 }
       
  1560 //-----------------------trap_state_is_recompiled------------------------------
       
  1561 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
       
  1562   return (trap_state & DS_RECOMPILE_BIT) != 0;
       
  1563 }
       
  1564 //-----------------------trap_state_set_recompiled-----------------------------
       
  1565 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
       
  1566   if (z)  return trap_state |  DS_RECOMPILE_BIT;
       
  1567   else    return trap_state & ~DS_RECOMPILE_BIT;
       
  1568 }
       
  1569 //---------------------------format_trap_state---------------------------------
       
  1570 // This is used for debugging and diagnostics, including hotspot.log output.
       
  1571 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
       
  1572                                               int trap_state) {
       
  1573   DeoptReason reason      = trap_state_reason(trap_state);
       
  1574   bool        recomp_flag = trap_state_is_recompiled(trap_state);
       
  1575   // Re-encode the state from its decoded components.
       
  1576   int decoded_state = 0;
       
  1577   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
       
  1578     decoded_state = trap_state_add_reason(decoded_state, reason);
       
  1579   if (recomp_flag)
       
  1580     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
       
  1581   // If the state re-encodes properly, format it symbolically.
       
  1582   // Because this routine is used for debugging and diagnostics,
       
  1583   // be robust even if the state is a strange value.
       
  1584   size_t len;
       
  1585   if (decoded_state != trap_state) {
       
  1586     // Random buggy state that doesn't decode??
       
  1587     len = jio_snprintf(buf, buflen, "#%d", trap_state);
       
  1588   } else {
       
  1589     len = jio_snprintf(buf, buflen, "%s%s",
       
  1590                        trap_reason_name(reason),
       
  1591                        recomp_flag ? " recompiled" : "");
       
  1592   }
       
  1593   if (len >= buflen)
       
  1594     buf[buflen-1] = '\0';
       
  1595   return buf;
       
  1596 }
       
  1597 
       
  1598 
       
  1599 //--------------------------------statics--------------------------------------
       
  1600 Deoptimization::DeoptAction Deoptimization::_unloaded_action
       
  1601   = Deoptimization::Action_reinterpret;
       
  1602 const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
       
  1603   // Note:  Keep this in sync. with enum DeoptReason.
       
  1604   "none",
       
  1605   "null_check",
       
  1606   "null_assert",
       
  1607   "range_check",
       
  1608   "class_check",
       
  1609   "array_check",
       
  1610   "intrinsic",
       
  1611   "unloaded",
       
  1612   "uninitialized",
       
  1613   "unreached",
       
  1614   "unhandled",
       
  1615   "constraint",
       
  1616   "div0_check",
       
  1617   "age"
       
  1618 };
       
  1619 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
       
  1620   // Note:  Keep this in sync. with enum DeoptAction.
       
  1621   "none",
       
  1622   "maybe_recompile",
       
  1623   "reinterpret",
       
  1624   "make_not_entrant",
       
  1625   "make_not_compilable"
       
  1626 };
       
  1627 
       
  1628 const char* Deoptimization::trap_reason_name(int reason) {
       
  1629   if (reason == Reason_many)  return "many";
       
  1630   if ((uint)reason < Reason_LIMIT)
       
  1631     return _trap_reason_name[reason];
       
  1632   static char buf[20];
       
  1633   sprintf(buf, "reason%d", reason);
       
  1634   return buf;
       
  1635 }
       
  1636 const char* Deoptimization::trap_action_name(int action) {
       
  1637   if ((uint)action < Action_LIMIT)
       
  1638     return _trap_action_name[action];
       
  1639   static char buf[20];
       
  1640   sprintf(buf, "action%d", action);
       
  1641   return buf;
       
  1642 }
       
  1643 
       
  1644 // This is used for debugging and diagnostics, including hotspot.log output.
       
  1645 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
       
  1646                                                 int trap_request) {
       
  1647   jint unloaded_class_index = trap_request_index(trap_request);
       
  1648   const char* reason = trap_reason_name(trap_request_reason(trap_request));
       
  1649   const char* action = trap_action_name(trap_request_action(trap_request));
       
  1650   size_t len;
       
  1651   if (unloaded_class_index < 0) {
       
  1652     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
       
  1653                        reason, action);
       
  1654   } else {
       
  1655     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
       
  1656                        reason, action, unloaded_class_index);
       
  1657   }
       
  1658   if (len >= buflen)
       
  1659     buf[buflen-1] = '\0';
       
  1660   return buf;
       
  1661 }
       
  1662 
       
  1663 juint Deoptimization::_deoptimization_hist
       
  1664         [Deoptimization::Reason_LIMIT]
       
  1665     [1 + Deoptimization::Action_LIMIT]
       
  1666         [Deoptimization::BC_CASE_LIMIT]
       
  1667   = {0};
       
  1668 
       
  1669 enum {
       
  1670   LSB_BITS = 8,
       
  1671   LSB_MASK = right_n_bits(LSB_BITS)
       
  1672 };
       
  1673 
       
  1674 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
       
  1675                                        Bytecodes::Code bc) {
       
  1676   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
       
  1677   assert(action >= 0 && action < Action_LIMIT, "oob");
       
  1678   _deoptimization_hist[Reason_none][0][0] += 1;  // total
       
  1679   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
       
  1680   juint* cases = _deoptimization_hist[reason][1+action];
       
  1681   juint* bc_counter_addr = NULL;
       
  1682   juint  bc_counter      = 0;
       
  1683   // Look for an unused counter, or an exact match to this BC.
       
  1684   if (bc != Bytecodes::_illegal) {
       
  1685     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
       
  1686       juint* counter_addr = &cases[bc_case];
       
  1687       juint  counter = *counter_addr;
       
  1688       if ((counter == 0 && bc_counter_addr == NULL)
       
  1689           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
       
  1690         // this counter is either free or is already devoted to this BC
       
  1691         bc_counter_addr = counter_addr;
       
  1692         bc_counter = counter | bc;
       
  1693       }
       
  1694     }
       
  1695   }
       
  1696   if (bc_counter_addr == NULL) {
       
  1697     // Overflow, or no given bytecode.
       
  1698     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
       
  1699     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
       
  1700   }
       
  1701   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
       
  1702 }
       
  1703 
       
  1704 jint Deoptimization::total_deoptimization_count() {
       
  1705   return _deoptimization_hist[Reason_none][0][0];
       
  1706 }
       
  1707 
       
  1708 jint Deoptimization::deoptimization_count(DeoptReason reason) {
       
  1709   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
       
  1710   return _deoptimization_hist[reason][0][0];
       
  1711 }
       
  1712 
       
  1713 void Deoptimization::print_statistics() {
       
  1714   juint total = total_deoptimization_count();
       
  1715   juint account = total;
       
  1716   if (total != 0) {
       
  1717     ttyLocker ttyl;
       
  1718     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
       
  1719     tty->print_cr("Deoptimization traps recorded:");
       
  1720     #define PRINT_STAT_LINE(name, r) \
       
  1721       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
       
  1722     PRINT_STAT_LINE("total", total);
       
  1723     // For each non-zero entry in the histogram, print the reason,
       
  1724     // the action, and (if specifically known) the type of bytecode.
       
  1725     for (int reason = 0; reason < Reason_LIMIT; reason++) {
       
  1726       for (int action = 0; action < Action_LIMIT; action++) {
       
  1727         juint* cases = _deoptimization_hist[reason][1+action];
       
  1728         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
       
  1729           juint counter = cases[bc_case];
       
  1730           if (counter != 0) {
       
  1731             char name[1*K];
       
  1732             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
       
  1733             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
       
  1734               bc = Bytecodes::_illegal;
       
  1735             sprintf(name, "%s/%s/%s",
       
  1736                     trap_reason_name(reason),
       
  1737                     trap_action_name(action),
       
  1738                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
       
  1739             juint r = counter >> LSB_BITS;
       
  1740             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
       
  1741             account -= r;
       
  1742           }
       
  1743         }
       
  1744       }
       
  1745     }
       
  1746     if (account != 0) {
       
  1747       PRINT_STAT_LINE("unaccounted", account);
       
  1748     }
       
  1749     #undef PRINT_STAT_LINE
       
  1750     if (xtty != NULL)  xtty->tail("statistics");
       
  1751   }
       
  1752 }
       
  1753 #else // COMPILER2
       
  1754 
       
  1755 
       
  1756 // Stubs for C1 only system.
       
  1757 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
       
  1758   return false;
       
  1759 }
       
  1760 
       
  1761 const char* Deoptimization::trap_reason_name(int reason) {
       
  1762   return "unknown";
       
  1763 }
       
  1764 
       
  1765 void Deoptimization::print_statistics() {
       
  1766   // no output
       
  1767 }
       
  1768 
       
  1769 void
       
  1770 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
       
  1771   // no udpate
       
  1772 }
       
  1773 
       
  1774 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
       
  1775   return 0;
       
  1776 }
       
  1777 
       
  1778 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
       
  1779                                        Bytecodes::Code bc) {
       
  1780   // no update
       
  1781 }
       
  1782 
       
  1783 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
       
  1784                                               int trap_state) {
       
  1785   jio_snprintf(buf, buflen, "#%d", trap_state);
       
  1786   return buf;
       
  1787 }
       
  1788 
       
  1789 #endif // COMPILER2