hotspot/src/share/vm/code/compiledIC.cpp
changeset 42544 58de8aaf9365
parent 38133 78b95467b9f1
child 42650 1f304d0c888b
equal deleted inserted replaced
42543:9e7ac7aff2d1 42544:58de8aaf9365
   458   // cleaning it immediately is harmless.
   458   // cleaning it immediately is harmless.
   459   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
   459   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
   460 }
   460 }
   461 
   461 
   462 
   462 
   463 // is_optimized: Compiler has generated an optimized call (i.e., no inline
   463 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
   464 // cache) static_bound: The call can be static bound (i.e, no need to use
   464 // static_bound: The call can be static bound. If it isn't also optimized, the property
   465 // inline cache)
   465 // wasn't provable at time of compilation. An optimized call will have any necessary
       
   466 // null check, while a static_bound won't. A static_bound (but not optimized) must
       
   467 // therefore use the unverified entry point.
   466 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
   468 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
   467                                            KlassHandle receiver_klass,
   469                                            KlassHandle receiver_klass,
   468                                            bool is_optimized,
   470                                            bool is_optimized,
   469                                            bool static_bound,
   471                                            bool static_bound,
   470                                            CompiledICInfo& info,
   472                                            CompiledICInfo& info,
   473 
   475 
   474   address entry = NULL;
   476   address entry = NULL;
   475   if (method_code != NULL && method_code->is_in_use()) {
   477   if (method_code != NULL && method_code->is_in_use()) {
   476     assert(method_code->is_compiled(), "must be compiled");
   478     assert(method_code->is_compiled(), "must be compiled");
   477     // Call to compiled code
   479     // Call to compiled code
   478     if (static_bound || is_optimized) {
   480     //
       
   481     // Note: the following problem exists with Compiler1:
       
   482     //   - at compile time we may or may not know if the destination is final
       
   483     //   - if we know that the destination is final (is_optimized), we will emit
       
   484     //     an optimized virtual call (no inline cache), and need a Method* to make
       
   485     //     a call to the interpreter
       
   486     //   - if we don't know if the destination is final, we emit a standard
       
   487     //     virtual call, and use CompiledICHolder to call interpreted code
       
   488     //     (no static call stub has been generated)
       
   489     //   - In the case that we here notice the call is static bound we
       
   490     //     convert the call into what looks to be an optimized virtual call,
       
   491     //     but we must use the unverified entry point (since there will be no
       
   492     //     null check on a call when the target isn't loaded).
       
   493     //     This causes problems when verifying the IC because
       
   494     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
       
   495     //     is aware of this and weakens its asserts.
       
   496     if (is_optimized) {
   479       entry      = method_code->verified_entry_point();
   497       entry      = method_code->verified_entry_point();
   480     } else {
   498     } else {
   481       entry      = method_code->entry_point();
   499       entry      = method_code->entry_point();
   482     }
   500     }
   483   }
   501   }
   484   if (entry != NULL) {
   502   if (entry != NULL) {
   485     // Call to compiled code
   503     // Call to compiled code
   486     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
   504     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
   487   } else {
   505   } else {
   488     // Note: the following problem exists with Compiler1:
       
   489     //   - at compile time we may or may not know if the destination is final
       
   490     //   - if we know that the destination is final, we will emit an optimized
       
   491     //     virtual call (no inline cache), and need a Method* to make a call
       
   492     //     to the interpreter
       
   493     //   - if we do not know if the destination is final, we emit a standard
       
   494     //     virtual call, and use CompiledICHolder to call interpreted code
       
   495     //     (no static call stub has been generated)
       
   496     //     However in that case we will now notice it is static_bound
       
   497     //     and convert the call into what looks to be an optimized
       
   498     //     virtual call. This causes problems in verifying the IC because
       
   499     //     it look vanilla but is optimized. Code in is_call_to_interpreted
       
   500     //     is aware of this and weakens its asserts.
       
   501 
       
   502     // static_bound should imply is_optimized -- otherwise we have a
       
   503     // performance bug (statically-bindable method is called via
       
   504     // dynamically-dispatched call note: the reverse implication isn't
       
   505     // necessarily true -- the call may have been optimized based on compiler
       
   506     // analysis (static_bound is only based on "final" etc.)
       
   507 #ifdef COMPILER2
       
   508 #ifdef TIERED
       
   509 #if defined(ASSERT)
       
   510     // can't check the assert because we don't have the CompiledIC with which to
       
   511     // find the address if the call instruction.
       
   512     //
       
   513     // CodeBlob* cb = find_blob_unsafe(instruction_address());
       
   514     // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
       
   515 #endif // ASSERT
       
   516 #else
       
   517     assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
       
   518 #endif // TIERED
       
   519 #endif // COMPILER2
       
   520     if (is_optimized) {
   506     if (is_optimized) {
   521       // Use stub entry
   507       // Use stub entry
   522       info.set_interpreter_entry(method()->get_c2i_entry(), method());
   508       info.set_interpreter_entry(method()->get_c2i_entry(), method());
   523     } else {
   509     } else {
   524       // Use icholder entry
   510       // Use icholder entry