177 int index = str.get_method_index(); |
177 int index = str.get_method_index(); |
178 size_t call_site_offset = cpcache->get_f1_offset(index); |
178 size_t call_site_offset = cpcache->get_f1_offset(index); |
179 |
179 |
180 // Load the CallSite object from the constant pool cache. |
180 // Load the CallSite object from the constant pool cache. |
181 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); |
181 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); |
182 Node* cpc = kit.makecon(cpcache_ptr); |
182 Node* cpcache_adr = kit.makecon(cpcache_ptr); |
183 Node* adr = kit.basic_plus_adr(cpc, cpc, call_site_offset); |
183 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); |
184 Node* call_site = kit.make_load(kit.control(), adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); |
184 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); |
185 |
185 |
186 // Load the MethodHandle (target) from the CallSite object. |
186 // Load the target MethodHandle from the CallSite object. |
187 Node* mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); |
187 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); |
188 Node* mh = kit.make_load(kit.control(), mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); |
188 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); |
189 |
189 |
190 address stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); |
190 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); |
191 |
191 |
192 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), stub, method(), kit.bci()); |
192 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci()); |
193 // invokedynamic is treated as an optimized invokevirtual. |
193 // invokedynamic is treated as an optimized invokevirtual. |
194 call->set_optimized_virtual(true); |
194 call->set_optimized_virtual(true); |
195 // Take extra care (in the presence of argument motion) not to trash the SP: |
195 // Take extra care (in the presence of argument motion) not to trash the SP: |
196 call->set_method_handle_invoke(true); |
196 call->set_method_handle_invoke(true); |
197 |
197 |
198 // Pass the MethodHandle as first argument and shift the other |
198 // Pass the target MethodHandle as first argument and shift the |
199 // arguments. |
199 // other arguments. |
200 call->init_req(0 + TypeFunc::Parms, mh); |
200 call->init_req(0 + TypeFunc::Parms, target_mh); |
201 uint nargs = call->method()->arg_size(); |
201 uint nargs = call->method()->arg_size(); |
202 for (uint i = 1; i < nargs; i++) { |
202 for (uint i = 1; i < nargs; i++) { |
203 Node* arg = kit.argument(i - 1); |
203 Node* arg = kit.argument(i - 1); |
204 call->init_req(i + TypeFunc::Parms, arg); |
204 call->init_req(i + TypeFunc::Parms, arg); |
205 } |
205 } |
645 } |
645 } |
646 return kit.transfer_exceptions_into_jvms(); |
646 return kit.transfer_exceptions_into_jvms(); |
647 } |
647 } |
648 |
648 |
649 |
649 |
|
650 //------------------------PredictedDynamicCallGenerator----------------------- |
|
651 // Internal class which handles all out-of-line calls checking receiver type. |
|
652 class PredictedDynamicCallGenerator : public CallGenerator { |
|
653 ciMethodHandle* _predicted_method_handle; |
|
654 CallGenerator* _if_missed; |
|
655 CallGenerator* _if_hit; |
|
656 float _hit_prob; |
|
657 |
|
658 public: |
|
659 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle, |
|
660 CallGenerator* if_missed, |
|
661 CallGenerator* if_hit, |
|
662 float hit_prob) |
|
663 : CallGenerator(if_missed->method()), |
|
664 _predicted_method_handle(predicted_method_handle), |
|
665 _if_missed(if_missed), |
|
666 _if_hit(if_hit), |
|
667 _hit_prob(hit_prob) |
|
668 {} |
|
669 |
|
670 virtual bool is_inline() const { return _if_hit->is_inline(); } |
|
671 virtual bool is_deferred() const { return _if_hit->is_deferred(); } |
|
672 |
|
673 virtual JVMState* generate(JVMState* jvms); |
|
674 }; |
|
675 |
|
676 |
|
677 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle, |
|
678 CallGenerator* if_missed, |
|
679 CallGenerator* if_hit, |
|
680 float hit_prob) { |
|
681 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob); |
|
682 } |
|
683 |
|
684 |
|
685 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) { |
|
686 GraphKit kit(jvms); |
|
687 PhaseGVN& gvn = kit.gvn(); |
|
688 |
|
689 CompileLog* log = kit.C->log(); |
|
690 if (log != NULL) { |
|
691 log->elem("predicted_dynamic_call bci='%d'", jvms->bci()); |
|
692 } |
|
693 |
|
694 // Get the constant pool cache from the caller class. |
|
695 ciMethod* caller_method = jvms->method(); |
|
696 ciBytecodeStream str(caller_method); |
|
697 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci. |
|
698 ciCPCache* cpcache = str.get_cpcache(); |
|
699 |
|
700 // Get the offset of the CallSite from the constant pool cache |
|
701 // pointer. |
|
702 int index = str.get_method_index(); |
|
703 size_t call_site_offset = cpcache->get_f1_offset(index); |
|
704 |
|
705 // Load the CallSite object from the constant pool cache. |
|
706 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache); |
|
707 Node* cpcache_adr = kit.makecon(cpcache_ptr); |
|
708 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset); |
|
709 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); |
|
710 |
|
711 // Load the target MethodHandle from the CallSite object. |
|
712 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); |
|
713 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT); |
|
714 |
|
715 // Check if the MethodHandle is still the same. |
|
716 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true); |
|
717 Node* predicted_mh = kit.makecon(predicted_mh_ptr); |
|
718 |
|
719 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh)); |
|
720 Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) ); |
|
721 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN); |
|
722 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff))); |
|
723 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff)); |
|
724 |
|
725 SafePointNode* slow_map = NULL; |
|
726 JVMState* slow_jvms; |
|
727 { PreserveJVMState pjvms(&kit); |
|
728 kit.set_control(slow_ctl); |
|
729 if (!kit.stopped()) { |
|
730 slow_jvms = _if_missed->generate(kit.sync_jvms()); |
|
731 assert(slow_jvms != NULL, "miss path must not fail to generate"); |
|
732 kit.add_exception_states_from(slow_jvms); |
|
733 kit.set_map(slow_jvms->map()); |
|
734 if (!kit.stopped()) |
|
735 slow_map = kit.stop(); |
|
736 } |
|
737 } |
|
738 |
|
739 if (kit.stopped()) { |
|
740 // Instance exactly does not matches the desired type. |
|
741 kit.set_jvms(slow_jvms); |
|
742 return kit.transfer_exceptions_into_jvms(); |
|
743 } |
|
744 |
|
745 // Make the hot call: |
|
746 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); |
|
747 if (new_jvms == NULL) { |
|
748 // Inline failed, so make a direct call. |
|
749 assert(_if_hit->is_inline(), "must have been a failed inline"); |
|
750 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); |
|
751 new_jvms = cg->generate(kit.sync_jvms()); |
|
752 } |
|
753 kit.add_exception_states_from(new_jvms); |
|
754 kit.set_jvms(new_jvms); |
|
755 |
|
756 // Need to merge slow and fast? |
|
757 if (slow_map == NULL) { |
|
758 // The fast path is the only path remaining. |
|
759 return kit.transfer_exceptions_into_jvms(); |
|
760 } |
|
761 |
|
762 if (kit.stopped()) { |
|
763 // Inlined method threw an exception, so it's just the slow path after all. |
|
764 kit.set_jvms(slow_jvms); |
|
765 return kit.transfer_exceptions_into_jvms(); |
|
766 } |
|
767 |
|
768 // Finish the diamond. |
|
769 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization |
|
770 RegionNode* region = new (kit.C, 3) RegionNode(3); |
|
771 region->init_req(1, kit.control()); |
|
772 region->init_req(2, slow_map->control()); |
|
773 kit.set_control(gvn.transform(region)); |
|
774 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); |
|
775 iophi->set_req(2, slow_map->i_o()); |
|
776 kit.set_i_o(gvn.transform(iophi)); |
|
777 kit.merge_memory(slow_map->merged_memory(), region, 2); |
|
778 uint tos = kit.jvms()->stkoff() + kit.sp(); |
|
779 uint limit = slow_map->req(); |
|
780 for (uint i = TypeFunc::Parms; i < limit; i++) { |
|
781 // Skip unused stack slots; fast forward to monoff(); |
|
782 if (i == tos) { |
|
783 i = kit.jvms()->monoff(); |
|
784 if( i >= limit ) break; |
|
785 } |
|
786 Node* m = kit.map()->in(i); |
|
787 Node* n = slow_map->in(i); |
|
788 if (m != n) { |
|
789 const Type* t = gvn.type(m)->meet(gvn.type(n)); |
|
790 Node* phi = PhiNode::make(region, m, t); |
|
791 phi->set_req(2, n); |
|
792 kit.map()->set_req(i, gvn.transform(phi)); |
|
793 } |
|
794 } |
|
795 return kit.transfer_exceptions_into_jvms(); |
|
796 } |
|
797 |
|
798 |
650 //-------------------------UncommonTrapCallGenerator----------------------------- |
799 //-------------------------UncommonTrapCallGenerator----------------------------- |
651 // Internal class which handles all out-of-line calls checking receiver type. |
800 // Internal class which handles all out-of-line calls checking receiver type. |
652 class UncommonTrapCallGenerator : public CallGenerator { |
801 class UncommonTrapCallGenerator : public CallGenerator { |
653 Deoptimization::DeoptReason _reason; |
802 Deoptimization::DeoptReason _reason; |
654 Deoptimization::DeoptAction _action; |
803 Deoptimization::DeoptAction _action; |