hotspot/src/share/vm/opto/doCall.cpp
changeset 13391 30245956af37
parent 12741 9315df9ea655
child 13392 1ef07ae0723d
--- a/hotspot/src/share/vm/opto/doCall.cpp	Mon Jul 23 13:04:59 2012 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Tue Jul 24 10:51:00 2012 -0700
@@ -59,13 +59,13 @@
 }
 #endif
 
-CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
+CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual,
                                        JVMState* jvms, bool allow_inline,
                                        float prof_factor, bool allow_intrinsics) {
   ciMethod*       caller   = jvms->method();
   int             bci      = jvms->bci();
   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
-  guarantee(call_method != NULL, "failed method resolution");
+  guarantee(callee != NULL, "failed method resolution");
 
   // Dtrace currently doesn't work unless all calls are vanilla
   if (env()->dtrace_method_probes()) {
@@ -91,7 +91,7 @@
     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
-                    log->identify(call_method), site_count, prof_factor);
+                    log->identify(callee), site_count, prof_factor);
     if (call_is_virtual)  log->print(" virtual='1'");
     if (allow_inline)     log->print(" inline='1'");
     if (receiver_count >= 0) {
@@ -109,7 +109,7 @@
   // We do this before the strict f.p. check below because the
   // intrinsics handle strict f.p. correctly.
   if (allow_inline && allow_intrinsics) {
-    CallGenerator* cg = find_intrinsic(call_method, call_is_virtual);
+    CallGenerator* cg = find_intrinsic(callee, call_is_virtual);
     if (cg != NULL)  return cg;
   }
 
@@ -117,19 +117,12 @@
   // NOTE: This must happen before normal inlining logic below since
   // MethodHandle.invoke* are native methods which obviously don't
   // have bytecodes and so normal inlining fails.
-  if (call_method->is_method_handle_invoke()) {
-    if (bytecode != Bytecodes::_invokedynamic) {
-      GraphKit kit(jvms);
-      Node* method_handle = kit.argument(0);
-      return CallGenerator::for_method_handle_call(method_handle, jvms, caller, call_method, profile);
-    }
-    else {
-      return CallGenerator::for_invokedynamic_call(jvms, caller, call_method, profile);
-    }
+  if (callee->is_method_handle_intrinsic()) {
+    return CallGenerator::for_method_handle_call(jvms, caller, callee);
   }
 
   // Do not inline strict fp into non-strict code, or the reverse
-  if (caller->is_strict() ^ call_method->is_strict()) {
+  if (caller->is_strict() ^ callee->is_strict()) {
     allow_inline = false;
   }
 
@@ -155,26 +148,26 @@
       }
       WarmCallInfo scratch_ci;
       if (!UseOldInlining)
-        scratch_ci.init(jvms, call_method, profile, prof_factor);
-      WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
+        scratch_ci.init(jvms, callee, profile, prof_factor);
+      WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci);
       assert(ci != &scratch_ci, "do not let this pointer escape");
       bool allow_inline   = (ci != NULL && !ci->is_cold());
       bool require_inline = (allow_inline && ci->is_hot());
 
       if (allow_inline) {
-        CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
-        if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
+        CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
+        if (require_inline && cg != NULL && should_delay_inlining(callee, jvms)) {
           // Delay the inlining of this method to give us the
           // opportunity to perform some high level optimizations
           // first.
-          return CallGenerator::for_late_inline(call_method, cg);
+          return CallGenerator::for_late_inline(callee, cg);
         }
         if (cg == NULL) {
           // Fall through.
         } else if (require_inline || !InlineWarmCalls) {
           return cg;
         } else {
-          CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
+          CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor);
           return CallGenerator::for_warm_call(ci, cold_cg, cg);
         }
       }
@@ -189,7 +182,7 @@
           (profile.morphism() == 2 && UseBimorphicInlining)) {
         // receiver_method = profile.method();
         // Profiles do not suggest methods now.  Look it up in the major receiver.
-        receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
+        receiver_method = callee->resolve_invoke(jvms->method()->holder(),
                                                       profile.receiver(0));
       }
       if (receiver_method != NULL) {
@@ -201,7 +194,7 @@
           CallGenerator* next_hit_cg = NULL;
           ciMethod* next_receiver_method = NULL;
           if (profile.morphism() == 2 && UseBimorphicInlining) {
-            next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
+            next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
                                                                profile.receiver(1));
             if (next_receiver_method != NULL) {
               next_hit_cg = this->call_generator(next_receiver_method,
@@ -224,12 +217,12 @@
              ) {
             // Generate uncommon trap for class check failure path
             // in case of monomorphic or bimorphic virtual call site.
-            miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
+            miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
                         Deoptimization::Action_maybe_recompile);
           } else {
             // Generate virtual call for class check failure path
             // in case of polymorphic virtual call site.
-            miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
+            miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
           }
           if (miss_cg != NULL) {
             if (next_hit_cg != NULL) {
@@ -252,11 +245,11 @@
   // There was no special inlining tactic, or it bailed out.
   // Use a more generic tactic, like a simple call.
   if (call_is_virtual) {
-    return CallGenerator::for_virtual_call(call_method, vtable_index);
+    return CallGenerator::for_virtual_call(callee, vtable_index);
   } else {
     // Class Hierarchy Analysis or Type Profile reveals a unique target,
     // or it is a static or special call.
-    return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
+    return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
   }
 }
 
@@ -355,33 +348,40 @@
 
   // Find target being called
   bool             will_link;
-  ciMethod*        dest_method   = iter().get_method(will_link);
-  ciInstanceKlass* holder_klass  = dest_method->holder();
+  ciMethod*        bc_callee    = iter().get_method(will_link);  // actual callee from bytecode
+  ciInstanceKlass* holder_klass = bc_callee->holder();
   ciKlass* holder = iter().get_declared_method_holder();
   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
 
-  int nargs = dest_method->arg_size();
-  if (is_invokedynamic)  nargs -= 1;
-
   // uncommon-trap when callee is unloaded, uninitialized or will not link
   // bailout when too many arguments for register representation
-  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
+  if (!will_link || can_not_compile_call_site(bc_callee, klass)) {
 #ifndef PRODUCT
     if (PrintOpto && (Verbose || WizardMode)) {
       method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
-      dest_method->print_name(); tty->cr();
+      bc_callee->print_name(); tty->cr();
     }
 #endif
     return;
   }
   assert(holder_klass->is_loaded(), "");
-  assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
+  //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc");  // XXX invokehandle (cur_bc_raw)
   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
   // Note:  In the absence of miranda methods, an abstract class K can perform
   // an invokevirtual directly on an interface method I.m if K implements I.
 
+  const int nargs = bc_callee->arg_size();
+
+  // Push appendix argument (MethodType, CallSite, etc.), if one.
+  if (iter().has_appendix()) {
+    ciObject* appendix_arg = iter().get_appendix();
+    const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
+    Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
+    push(appendix_arg_node);
+  }
+
   // ---------------------
   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
   // Then we may inline or make a static call, but become dependent on there being only 1 target.
@@ -392,21 +392,21 @@
   // Choose call strategy.
   bool call_is_virtual = is_virtual_or_interface;
   int vtable_index = methodOopDesc::invalid_vtable_index;
-  ciMethod* call_method = dest_method;
+  ciMethod* callee = bc_callee;
 
   // Try to get the most accurate receiver type
   if (is_virtual_or_interface) {
     Node*             receiver_node = stack(sp() - nargs);
     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
-    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
+    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type);
 
     // Have the call been sufficiently improved such that it is no longer a virtual?
     if (optimized_virtual_method != NULL) {
-      call_method     = optimized_virtual_method;
+      callee          = optimized_virtual_method;
       call_is_virtual = false;
-    } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
+    } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
       // We can make a vtable call at this site
-      vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
+      vtable_index = callee->resolve_vtable_index(method()->holder(), klass);
     }
   }
 
@@ -416,22 +416,24 @@
   bool try_inline = (C->do_inlining() || InlineAccessors);
 
   // ---------------------
-  inc_sp(- nargs);              // Temporarily pop args for JVM state of call
+  dec_sp(nargs);              // Temporarily pop args for JVM state of call
   JVMState* jvms = sync_jvms();
 
   // ---------------------
   // Decide call tactic.
   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
   // It decides whether inlining is desirable or not.
-  CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
+  CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
+
+  bc_callee = callee = NULL;  // don't use bc_callee and callee after this point
 
   // ---------------------
   // Round double arguments before call
-  round_double_arguments(dest_method);
+  round_double_arguments(cg->method());
 
 #ifndef PRODUCT
   // bump global counters for calls
-  count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
+  count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
 
   // Record first part of parsing work for this call
   parse_histogram()->record_change();
@@ -447,8 +449,8 @@
   // because exceptions don't return to the call site.)
   profile_call(receiver);
 
-  JVMState* new_jvms;
-  if ((new_jvms = cg->generate(jvms)) == NULL) {
+  JVMState* new_jvms = cg->generate(jvms);
+  if (new_jvms == NULL) {
     // When inlining attempt fails (e.g., too many arguments),
     // it may contaminate the current compile state, making it
     // impossible to pull back and try again.  Once we call
@@ -460,7 +462,7 @@
     // the call site, perhaps because it did not match a pattern the
     // intrinsic was expecting to optimize. Should always be possible to
     // get a normal java call that may inline in that case
-    cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
+    cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
     if ((new_jvms = cg->generate(jvms)) == NULL) {
       guarantee(failing(), "call failed to generate:  calls should work");
       return;
@@ -469,8 +471,8 @@
 
   if (cg->is_inline()) {
     // Accumulate has_loops estimate
-    C->set_has_loops(C->has_loops() || call_method->has_loops());
-    C->env()->notice_inlined_method(call_method);
+    C->set_has_loops(C->has_loops() || cg->method()->has_loops());
+    C->env()->notice_inlined_method(cg->method());
   }
 
   // Reset parser state from [new_]jvms, which now carries results of the call.
@@ -492,20 +494,74 @@
     }
 
     // Round double result after a call from strict to non-strict code
-    round_double_result(dest_method);
+    round_double_result(cg->method());
+
+    ciType* rtype = cg->method()->return_type();
+    if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) {
+      // Be careful here with return types.
+      ciType* ctype = iter().get_declared_method_signature()->return_type();
+      if (ctype != rtype) {
+        BasicType rt = rtype->basic_type();
+        BasicType ct = ctype->basic_type();
+        Node* retnode = peek();
+        if (ct == T_VOID) {
+          // It's OK for a method  to return a value that is discarded.
+          // The discarding does not require any special action from the caller.
+          // The Java code knows this, at VerifyType.isNullConversion.
+          pop_node(rt);  // whatever it was, pop it
+          retnode = top();
+        } else if (rt == T_INT || is_subword_type(rt)) {
+          // FIXME: This logic should be factored out.
+          if (ct == T_BOOLEAN) {
+            retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0x1)) );
+          } else if (ct == T_CHAR) {
+            retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0xFFFF)) );
+          } else if (ct == T_BYTE) {
+            retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(24)) );
+            retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(24)) );
+          } else if (ct == T_SHORT) {
+            retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(16)) );
+            retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(16)) );
+          } else {
+            assert(ct == T_INT, err_msg("rt=%d, ct=%d", rt, ct));
+          }
+        } else if (rt == T_OBJECT) {
+          assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct));
+          if (ctype->is_loaded()) {
+            Node* if_fail = top();
+            retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail);
+            if (if_fail != top()) {
+              PreserveJVMState pjvms(this);
+              set_control(if_fail);
+              builtin_throw(Deoptimization::Reason_class_check);
+            }
+            pop();
+            push(retnode);
+          }
+        } else {
+          assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct));
+          // push a zero; it's better than getting an oop/int mismatch
+          retnode = pop_node(rt);
+          retnode = zerocon(ct);
+          push_node(ct, retnode);
+        }
+        // Now that the value is well-behaved, continue with the call-site type.
+        rtype = ctype;
+      }
+    }
 
     // If the return type of the method is not loaded, assert that the
     // value we got is a null.  Otherwise, we need to recompile.
-    if (!dest_method->return_type()->is_loaded()) {
+    if (!rtype->is_loaded()) {
 #ifndef PRODUCT
       if (PrintOpto && (Verbose || WizardMode)) {
         method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
-        dest_method->print_name(); tty->cr();
+        cg->method()->print_name(); tty->cr();
       }
 #endif
       if (C->log() != NULL) {
         C->log()->elem("assert_null reason='return' klass='%d'",
-                       C->log()->identify(dest_method->return_type()));
+                       C->log()->identify(rtype));
       }
       // If there is going to be a trap, put it at the next bytecode:
       set_bci(iter().next_bci());