18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/os.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/debug.hpp"
45
46 // Utility function.
47 const TypeFunc* CallGenerator::tf() const {
48 return TypeFunc::make(method());
49 }
50
51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53 }
54
55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57 return is_inlined_method_handle_intrinsic(symbolic_info, m);
103 GraphKit& exits = parser.exits();
104
105 if (C->failing()) {
106 while (exits.pop_exception_state() != nullptr) ;
107 return nullptr;
108 }
109
110 assert(exits.jvms()->same_calls_as(jvms), "sanity");
111
112 // Simply return the exit state of the parser,
113 // augmented by any exceptional states.
114 return exits.transfer_exceptions_into_jvms();
115 }
116
117 //---------------------------DirectCallGenerator------------------------------
118 // Internal class which handles all out-of-line calls w/o receiver type checks.
119 class DirectCallGenerator : public CallGenerator {
120 private:
121 CallStaticJavaNode* _call_node;
122 // Force separate memory and I/O projections for the exceptional
123 // paths to facilitate late inlinig.
124 bool _separate_io_proj;
125
126 protected:
127 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _separate_io_proj(separate_io_proj)
133 {
134 }
135 virtual JVMState* generate(JVMState* jvms);
136
137 virtual CallNode* call_node() const { return _call_node; }
138 virtual CallGenerator* with_call_node(CallNode* call) {
139 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
140 dcg->set_call_node(call->as_CallStaticJava());
141 return dcg;
142 }
143 };
144
145 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
146 GraphKit kit(jvms);
147 kit.C->print_inlining_update(this);
148 bool is_static = method()->is_static();
149 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
150 : SharedRuntime::get_resolve_opt_virtual_call_stub();
151
152 if (kit.C->log() != nullptr) {
153 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
154 }
155
156 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
157 if (is_inlined_method_handle_intrinsic(jvms, method())) {
158 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
159 // additional information about the method being invoked should be attached
160 // to the call site to make resolution logic work
161 // (see SharedRuntime::resolve_static_call_C).
162 call->set_override_symbolic_info(true);
163 }
164 _call_node = call; // Save the call node in case we need it later
165 if (!is_static) {
166 // Make an explicit receiver null_check as part of this call.
167 // Since we share a map with the caller, his JVMS gets adjusted.
168 kit.null_check_receiver_before_call(method());
169 if (kit.stopped()) {
170 // And dump it back to the caller, decorated with any exceptions:
171 return kit.transfer_exceptions_into_jvms();
172 }
173 // Mark the call node as virtual, sort of:
174 call->set_optimized_virtual(true);
175 if (method()->is_method_handle_intrinsic() ||
176 method()->is_compiled_lambda_form()) {
177 call->set_method_handle_invoke(true);
178 }
179 }
180 kit.set_arguments_for_java_call(call);
181 kit.set_edges_for_java_call(call, false, _separate_io_proj);
182 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
183 kit.push_node(method()->return_type()->basic_type(), ret);
184 return kit.transfer_exceptions_into_jvms();
185 }
186
187 //--------------------------VirtualCallGenerator------------------------------
188 // Internal class which handles all out-of-line calls checking receiver type.
189 class VirtualCallGenerator : public CallGenerator {
190 private:
191 int _vtable_index;
192 bool _separate_io_proj;
193 CallDynamicJavaNode* _call_node;
194
195 protected:
196 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
197
198 public:
199 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
200 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
201 {
202 assert(vtable_index == Method::invalid_vtable_index ||
203 vtable_index >= 0, "either invalid or usable");
204 }
205 virtual bool is_virtual() const { return true; }
206 virtual JVMState* generate(JVMState* jvms);
207
208 virtual CallNode* call_node() const { return _call_node; }
209 int vtable_index() const { return _vtable_index; }
210
211 virtual CallGenerator* with_call_node(CallNode* call) {
212 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
213 cg->set_call_node(call->as_CallDynamicJava());
214 return cg;
215 }
216 };
217
218 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
219 GraphKit kit(jvms);
220 Node* receiver = kit.argument(0);
221
222 kit.C->print_inlining_update(this);
223
224 if (kit.C->log() != nullptr) {
225 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
226 }
227
228 // If the receiver is a constant null, do not torture the system
229 // by attempting to call through it. The compile will proceed
230 // correctly, but may bail out in final_graph_reshaping, because
231 // the call instruction will have a seemingly deficient out-count.
232 // (The bailout says something misleading about an "infinite loop".)
233 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
234 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
235 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
236 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
237 kit.inc_sp(arg_size); // restore arguments
238 kit.uncommon_trap(Deoptimization::Reason_null_check,
239 Deoptimization::Action_none,
240 nullptr, "null receiver");
241 return kit.transfer_exceptions_into_jvms();
261 }
262
263 assert(!method()->is_static(), "virtual call must not be to static");
264 assert(!method()->is_final(), "virtual call should not be to final");
265 assert(!method()->is_private(), "virtual call should not be to private");
266 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
267 "no vtable calls if +UseInlineCaches ");
268 address target = SharedRuntime::get_resolve_virtual_call_stub();
269 // Normal inline cache used for call
270 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
271 if (is_inlined_method_handle_intrinsic(jvms, method())) {
272 // To be able to issue a direct call (optimized virtual or virtual)
273 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
274 // about the method being invoked should be attached to the call site to
275 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
276 call->set_override_symbolic_info(true);
277 }
278 _call_node = call; // Save the call node in case we need it later
279
280 kit.set_arguments_for_java_call(call);
281 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
282 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
283 kit.push_node(method()->return_type()->basic_type(), ret);
284
285 // Represent the effect of an implicit receiver null_check
286 // as part of this call. Since we share a map with the caller,
287 // his JVMS gets adjusted.
288 kit.cast_not_null(receiver);
289 return kit.transfer_exceptions_into_jvms();
290 }
291
292 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
293 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
294 return new ParseGenerator(m, expected_uses);
295 }
296
297 // As a special case, the JVMS passed to this CallGenerator is
298 // for the method execution already in progress, not just the JVMS
299 // of the caller. Thus, this CallGenerator cannot be mixed with others!
300 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
354 return DirectCallGenerator::generate(jvms);
355 }
356
357 virtual void print_inlining_late(InliningResult result, const char* msg) {
358 CallNode* call = call_node();
359 Compile* C = Compile::current();
360 C->print_inlining_assert_ready();
361 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
362 C->print_inlining_move_to(this);
363 C->print_inlining_update_delayed(this);
364 }
365
366 virtual void set_unique_id(jlong id) {
367 _unique_id = id;
368 }
369
370 virtual jlong unique_id() const {
371 return _unique_id;
372 }
373
374 virtual CallGenerator* with_call_node(CallNode* call) {
375 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
376 cg->set_call_node(call->as_CallStaticJava());
377 return cg;
378 }
379 };
380
381 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
382 return new LateInlineCallGenerator(method, inline_cg);
383 }
384
385 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
386 ciMethod* _caller;
387 bool _input_not_const;
388
389 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
390
391 public:
392 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
393 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
415 cg->set_call_node(call->as_CallStaticJava());
416 return cg;
417 }
418 };
419
420 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
421 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
422 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
423 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
424 // of late inlining with exceptions.
425 assert(!jvms->method()->has_exception_handlers() ||
426 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
427 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
428 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
429 bool allow_inline = C->inlining_incrementally();
430 bool input_not_const = true;
431 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
432 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
433
434 if (cg != nullptr) {
435 if (!allow_inline && (C->print_inlining() || C->print_intrinsics())) {
436 C->print_inlining(cg->method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE,
437 "late method handle call resolution");
438 }
439 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
440 _inline_cg = cg;
441 C->dec_number_of_mh_late_inlines();
442 return true;
443 } else {
444 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
445 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
446 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
447 return false;
448 }
449 }
450
451 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
452 assert(IncrementalInlineMH, "required");
453 Compile::current()->inc_number_of_mh_late_inlines();
454 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
585
586 void LateInlineMHCallGenerator::do_late_inline() {
587 CallGenerator::do_late_inline_helper();
588 }
589
590 void LateInlineVirtualCallGenerator::do_late_inline() {
591 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
592 CallGenerator::do_late_inline_helper();
593 }
594
595 void CallGenerator::do_late_inline_helper() {
596 assert(is_late_inline(), "only late inline allowed");
597
598 // Can't inline it
599 CallNode* call = call_node();
600 if (call == nullptr || call->outcnt() == 0 ||
601 call->in(0) == nullptr || call->in(0)->is_top()) {
602 return;
603 }
604
605 const TypeTuple *r = call->tf()->domain();
606 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
607 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
608 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
609 return;
610 }
611 }
612
613 if (call->in(TypeFunc::Memory)->is_top()) {
614 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
615 return;
616 }
617 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
618 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
619 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
620 return; // dead path
621 }
622 }
623
624 // check for unreachable loop
625 CallProjections callprojs;
626 // Similar to incremental inlining, don't assert that all call
627 // projections are still there for post-parse call devirtualization.
628 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
629 call->extract_projections(&callprojs, true, do_asserts);
630 if ((callprojs.fallthrough_catchproj == call->in(0)) ||
631 (callprojs.catchall_catchproj == call->in(0)) ||
632 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
633 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
634 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
635 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
636 (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
637 (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) {
638 return;
639 }
640
641 Compile* C = Compile::current();
642 // Remove inlined methods from Compiler's lists.
643 if (call->is_macro()) {
644 C->remove_macro_node(call);
645 }
646
647 // The call is marked as pure (no important side effects), but result isn't used.
648 // It's safe to remove the call.
649 bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
650
651 if (is_pure_call() && result_not_used) {
652 GraphKit kit(call->jvms());
653 kit.replace_call(call, C->top(), true, do_asserts);
654 } else {
655 // Make a clone of the JVMState that appropriate to use for driving a parse
656 JVMState* old_jvms = call->jvms();
657 JVMState* jvms = old_jvms->clone_shallow(C);
658 uint size = call->req();
659 SafePointNode* map = new SafePointNode(size, jvms);
660 for (uint i1 = 0; i1 < size; i1++) {
661 map->init_req(i1, call->in(i1));
662 }
663
664 // Make sure the state is a MergeMem for parsing.
665 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
666 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
667 C->initial_gvn()->set_type_bottom(mem);
668 map->set_req(TypeFunc::Memory, mem);
669 }
670
671 uint nargs = method()->arg_size();
672 // blow away old call arguments
673 Node* top = C->top();
674 for (uint i1 = 0; i1 < nargs; i1++) {
675 map->set_req(TypeFunc::Parms + i1, top);
676 }
677 jvms->set_map(map);
678
679 // Make enough space in the expression stack to transfer
680 // the incoming arguments and return value.
681 map->ensure_stack(jvms, jvms->method()->max_stack());
682 for (uint i1 = 0; i1 < nargs; i1++) {
683 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
684 }
685
686 C->print_inlining_assert_ready();
687
688 C->print_inlining_move_to(this);
689
690 C->log_late_inline(this);
691
692 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
693 if (!do_late_inline_check(C, jvms)) {
694 map->disconnect_inputs(C);
695 C->print_inlining_update_delayed(this);
696 return;
697 }
698 if (C->print_inlining() && (is_mh_late_inline() || is_virtual_late_inline())) {
699 C->print_inlining_update_delayed(this);
700 }
701
702 // Setup default node notes to be picked up by the inlining
703 Node_Notes* old_nn = C->node_notes_at(call->_idx);
704 if (old_nn != nullptr) {
705 Node_Notes* entry_nn = old_nn->clone(C);
706 entry_nn->set_jvms(jvms);
707 C->set_default_node_notes(entry_nn);
708 }
709
710 // Now perform the inlining using the synthesized JVMState
711 JVMState* new_jvms = inline_cg()->generate(jvms);
712 if (new_jvms == nullptr) return; // no change
713 if (C->failing()) return;
714
715 // Capture any exceptional control flow
716 GraphKit kit(new_jvms);
717
718 // Find the result object
719 Node* result = C->top();
720 int result_size = method()->return_type()->size();
721 if (result_size != 0 && !kit.stopped()) {
722 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
723 }
724
725 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
726 result = kit.must_be_not_null(result, false);
727 }
728
729 if (inline_cg()->is_inline()) {
730 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
731 C->env()->notice_inlined_method(inline_cg()->method());
732 }
733 C->set_inlining_progress(true);
734 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
735 kit.replace_call(call, result, true, do_asserts);
736 }
737 }
738
739 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
740
741 public:
742 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
743 LateInlineCallGenerator(method, inline_cg) {}
744
745 virtual JVMState* generate(JVMState* jvms) {
746 Compile *C = Compile::current();
747
748 C->log_inline_id(this);
749
750 C->add_string_late_inline(this);
751
752 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
753 return new_jvms;
754 }
937 // Inline failed, so make a direct call.
938 assert(_if_hit->is_inline(), "must have been a failed inline");
939 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
940 new_jvms = cg->generate(kit.sync_jvms());
941 }
942 kit.add_exception_states_from(new_jvms);
943 kit.set_jvms(new_jvms);
944
945 // Need to merge slow and fast?
946 if (slow_map == nullptr) {
947 // The fast path is the only path remaining.
948 return kit.transfer_exceptions_into_jvms();
949 }
950
951 if (kit.stopped()) {
952 // Inlined method threw an exception, so it's just the slow path after all.
953 kit.set_jvms(slow_jvms);
954 return kit.transfer_exceptions_into_jvms();
955 }
956
957 // There are 2 branches and the replaced nodes are only valid on
958 // one: restore the replaced nodes to what they were before the
959 // branch.
960 kit.map()->set_replaced_nodes(replaced_nodes);
961
962 // Finish the diamond.
963 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
964 RegionNode* region = new RegionNode(3);
965 region->init_req(1, kit.control());
966 region->init_req(2, slow_map->control());
967 kit.set_control(gvn.transform(region));
968 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
969 iophi->set_req(2, slow_map->i_o());
970 kit.set_i_o(gvn.transform(iophi));
971 // Merge memory
972 kit.merge_memory(slow_map->merged_memory(), region, 2);
973 // Transform new memory Phis.
974 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
975 Node* phi = mms.memory();
976 if (phi->is_Phi() && phi->in(0) == region) {
977 mms.set_memory(gvn.transform(phi));
978 }
979 }
980 uint tos = kit.jvms()->stkoff() + kit.sp();
981 uint limit = slow_map->req();
982 for (uint i = TypeFunc::Parms; i < limit; i++) {
983 // Skip unused stack slots; fast forward to monoff();
984 if (i == tos) {
985 i = kit.jvms()->monoff();
986 if( i >= limit ) break;
987 }
988 Node* m = kit.map()->in(i);
989 Node* n = slow_map->in(i);
990 if (m != n) {
991 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
992 Node* phi = PhiNode::make(region, m, t);
993 phi->set_req(2, n);
994 kit.map()->set_req(i, gvn.transform(phi));
995 }
996 }
997 return kit.transfer_exceptions_into_jvms();
998 }
999
1000
1001 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1002 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1003 bool input_not_const;
1004 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1005 Compile* C = Compile::current();
1006 bool should_delay = C->should_delay_inlining();
1007 if (cg != nullptr) {
1008 if (should_delay) {
1009 return CallGenerator::for_late_inline(callee, cg);
1010 } else {
1011 return cg;
1012 }
1013 }
1014 int bci = jvms->bci();
1015 ciCallProfile profile = caller->call_profile_at_bci(bci);
1016 int call_site_count = caller->scale_count(profile.count());
1017
1018 if (IncrementalInlineMH && call_site_count > 0 &&
1019 (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1020 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1021 } else {
1022 // Out-of-line call.
1023 return CallGenerator::for_direct_call(callee);
1024 }
1025 }
1026
1027 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1028 GraphKit kit(jvms);
1029 PhaseGVN& gvn = kit.gvn();
1030 Compile* C = kit.C;
1031 vmIntrinsics::ID iid = callee->intrinsic_id();
1032 input_not_const = true;
1033 if (StressMethodHandleLinkerInlining) {
1034 allow_inline = false;
1035 }
1036 switch (iid) {
1037 case vmIntrinsics::_invokeBasic:
1038 {
1039 // Get MethodHandle receiver:
1040 Node* receiver = kit.argument(0);
1041 if (receiver->Opcode() == Op_ConP) {
1042 input_not_const = false;
1043 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1044 if (recv_toop != nullptr) {
1045 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1046 const int vtable_index = Method::invalid_vtable_index;
1058 PROB_ALWAYS);
1059 return cg;
1060 } else {
1061 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1062 Type::str(receiver->bottom_type()));
1063 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1064 "receiver is always null");
1065 }
1066 } else {
1067 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1068 "receiver not constant");
1069 }
1070 }
1071 break;
1072
1073 case vmIntrinsics::_linkToVirtual:
1074 case vmIntrinsics::_linkToStatic:
1075 case vmIntrinsics::_linkToSpecial:
1076 case vmIntrinsics::_linkToInterface:
1077 {
1078 // Get MemberName argument:
1079 Node* member_name = kit.argument(callee->arg_size() - 1);
1080 if (member_name->Opcode() == Op_ConP) {
1081 input_not_const = false;
1082 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1083 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1084
1085 if (!ciMethod::is_consistent_info(callee, target)) {
1086 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1087 "signatures mismatch");
1088 return nullptr;
1089 }
1090
1091 // In lambda forms we erase signature types to avoid resolving issues
1092 // involving class loaders. When we optimize a method handle invoke
1093 // to a direct call we must cast the receiver and arguments to its
1094 // actual types.
1095 ciSignature* signature = target->signature();
1096 const int receiver_skip = target->is_static() ? 0 : 1;
1097 // Cast receiver to its type.
1098 if (!target->is_static()) {
1099 Node* recv = kit.argument(0);
1132 ciKlass* speculative_receiver_type = nullptr;
1133 if (is_virtual_or_interface) {
1134 ciInstanceKlass* klass = target->holder();
1135 Node* receiver_node = kit.argument(0);
1136 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1137 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1138 // optimize_virtual_call() takes 2 different holder
1139 // arguments for a corner case that doesn't apply here (see
1140 // Parse::do_call())
1141 target = C->optimize_virtual_call(caller, klass, klass,
1142 target, receiver_type, is_virtual,
1143 call_does_dispatch, vtable_index, // out-parameters
1144 false /* check_access */);
1145 // We lack profiling at this call but type speculation may
1146 // provide us with a type
1147 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1148 }
1149 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1150 allow_inline,
1151 PROB_ALWAYS,
1152 speculative_receiver_type);
1153 return cg;
1154 } else {
1155 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1156 "member_name not constant");
1157 }
1158 }
1159 break;
1160
1161 case vmIntrinsics::_linkToNative:
1162 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1163 "native call");
1164 break;
1165
1166 default:
1167 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1168 break;
1169 }
1170 return nullptr;
1171 }
1172
1207 // do_intrinsic(0)
1208 // else
1209 // if (predicate(1))
1210 // do_intrinsic(1)
1211 // ...
1212 // else
1213 // do_java_comp
1214
1215 GraphKit kit(jvms);
1216 PhaseGVN& gvn = kit.gvn();
1217
1218 CompileLog* log = kit.C->log();
1219 if (log != nullptr) {
1220 log->elem("predicated_intrinsic bci='%d' method='%d'",
1221 jvms->bci(), log->identify(method()));
1222 }
1223
1224 if (!method()->is_static()) {
1225 // We need an explicit receiver null_check before checking its type in predicate.
1226 // We share a map with the caller, so his JVMS gets adjusted.
1227 Node* receiver = kit.null_check_receiver_before_call(method());
1228 if (kit.stopped()) {
1229 return kit.transfer_exceptions_into_jvms();
1230 }
1231 }
1232
1233 int n_predicates = _intrinsic->predicates_count();
1234 assert(n_predicates > 0, "sanity");
1235
1236 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1237
1238 // Region for normal compilation code if intrinsic failed.
1239 Node* slow_region = new RegionNode(1);
1240
1241 int results = 0;
1242 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1243 #ifdef ASSERT
1244 JVMState* old_jvms = kit.jvms();
1245 SafePointNode* old_map = kit.map();
1246 Node* old_io = old_map->i_o();
1247 Node* old_mem = old_map->memory();
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/subnode.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/debug.hpp"
46
47 // Utility function.
48 const TypeFunc* CallGenerator::tf() const {
49 return TypeFunc::make(method());
50 }
51
52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
54 }
55
56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
57 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
58 return is_inlined_method_handle_intrinsic(symbolic_info, m);
104 GraphKit& exits = parser.exits();
105
106 if (C->failing()) {
107 while (exits.pop_exception_state() != nullptr) ;
108 return nullptr;
109 }
110
111 assert(exits.jvms()->same_calls_as(jvms), "sanity");
112
113 // Simply return the exit state of the parser,
114 // augmented by any exceptional states.
115 return exits.transfer_exceptions_into_jvms();
116 }
117
118 //---------------------------DirectCallGenerator------------------------------
119 // Internal class which handles all out-of-line calls w/o receiver type checks.
120 class DirectCallGenerator : public CallGenerator {
121 private:
122 CallStaticJavaNode* _call_node;
123 // Force separate memory and I/O projections for the exceptional
124 // paths to facilitate late inlining.
125 bool _separate_io_proj;
126
127 protected:
128 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
129
130 public:
131 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
132 : CallGenerator(method),
133 _call_node(nullptr),
134 _separate_io_proj(separate_io_proj)
135 {
136 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
137 // If that call has not been optimized by the time optimizations are over,
138 // we'll need to add a call to create an inline type instance from the klass
139 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
140 // Separating memory and I/O projections for exceptions is required to
141 // perform that graph transformation.
142 _separate_io_proj = true;
143 }
144 }
145 virtual JVMState* generate(JVMState* jvms);
146
147 virtual CallNode* call_node() const { return _call_node; }
148 virtual CallGenerator* with_call_node(CallNode* call) {
149 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
150 dcg->set_call_node(call->as_CallStaticJava());
151 return dcg;
152 }
153 };
154
155 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
156 GraphKit kit(jvms);
157 kit.C->print_inlining_update(this);
158 PhaseGVN& gvn = kit.gvn();
159 bool is_static = method()->is_static();
160 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
161 : SharedRuntime::get_resolve_opt_virtual_call_stub();
162
163 if (kit.C->log() != nullptr) {
164 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
165 }
166
167 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
168 if (is_inlined_method_handle_intrinsic(jvms, method())) {
169 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
170 // additional information about the method being invoked should be attached
171 // to the call site to make resolution logic work
172 // (see SharedRuntime::resolve_static_call_C).
173 call->set_override_symbolic_info(true);
174 }
175 _call_node = call; // Save the call node in case we need it later
176 if (!is_static) {
177 // Make an explicit receiver null_check as part of this call.
178 // Since we share a map with the caller, his JVMS gets adjusted.
179 kit.null_check_receiver_before_call(method());
180 if (kit.stopped()) {
181 // And dump it back to the caller, decorated with any exceptions:
182 return kit.transfer_exceptions_into_jvms();
183 }
184 // Mark the call node as virtual, sort of:
185 call->set_optimized_virtual(true);
186 if (method()->is_method_handle_intrinsic() ||
187 method()->is_compiled_lambda_form()) {
188 call->set_method_handle_invoke(true);
189 }
190 }
191 kit.set_arguments_for_java_call(call, is_late_inline());
192 if (kit.stopped()) {
193 return kit.transfer_exceptions_into_jvms();
194 }
195 kit.set_edges_for_java_call(call, false, _separate_io_proj);
196 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
197 kit.push_node(method()->return_type()->basic_type(), ret);
198 return kit.transfer_exceptions_into_jvms();
199 }
200
201 //--------------------------VirtualCallGenerator------------------------------
202 // Internal class which handles all out-of-line calls checking receiver type.
203 class VirtualCallGenerator : public CallGenerator {
204 private:
205 int _vtable_index;
206 bool _separate_io_proj;
207 CallDynamicJavaNode* _call_node;
208
209 protected:
210 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
211
212 public:
213 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
214 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
215 {
216 assert(vtable_index == Method::invalid_vtable_index ||
217 vtable_index >= 0, "either invalid or usable");
218 }
219 virtual bool is_virtual() const { return true; }
220 virtual JVMState* generate(JVMState* jvms);
221
222 virtual CallNode* call_node() const { return _call_node; }
223 int vtable_index() const { return _vtable_index; }
224
225 virtual CallGenerator* with_call_node(CallNode* call) {
226 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
227 cg->set_call_node(call->as_CallDynamicJava());
228 return cg;
229 }
230 };
231
232 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
233 GraphKit kit(jvms);
234 Node* receiver = kit.argument(0);
235 kit.C->print_inlining_update(this);
236
237 if (kit.C->log() != nullptr) {
238 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
239 }
240
241 // If the receiver is a constant null, do not torture the system
242 // by attempting to call through it. The compile will proceed
243 // correctly, but may bail out in final_graph_reshaping, because
244 // the call instruction will have a seemingly deficient out-count.
245 // (The bailout says something misleading about an "infinite loop".)
246 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
247 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
248 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
249 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
250 kit.inc_sp(arg_size); // restore arguments
251 kit.uncommon_trap(Deoptimization::Reason_null_check,
252 Deoptimization::Action_none,
253 nullptr, "null receiver");
254 return kit.transfer_exceptions_into_jvms();
274 }
275
276 assert(!method()->is_static(), "virtual call must not be to static");
277 assert(!method()->is_final(), "virtual call should not be to final");
278 assert(!method()->is_private(), "virtual call should not be to private");
279 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
280 "no vtable calls if +UseInlineCaches ");
281 address target = SharedRuntime::get_resolve_virtual_call_stub();
282 // Normal inline cache used for call
283 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
284 if (is_inlined_method_handle_intrinsic(jvms, method())) {
285 // To be able to issue a direct call (optimized virtual or virtual)
286 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
287 // about the method being invoked should be attached to the call site to
288 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
289 call->set_override_symbolic_info(true);
290 }
291 _call_node = call; // Save the call node in case we need it later
292
293 kit.set_arguments_for_java_call(call);
294 if (kit.stopped()) {
295 return kit.transfer_exceptions_into_jvms();
296 }
297 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
298 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
299 kit.push_node(method()->return_type()->basic_type(), ret);
300
301 // Represent the effect of an implicit receiver null_check
302 // as part of this call. Since we share a map with the caller,
303 // his JVMS gets adjusted.
304 kit.cast_not_null(receiver);
305 return kit.transfer_exceptions_into_jvms();
306 }
307
308 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
309 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
310 return new ParseGenerator(m, expected_uses);
311 }
312
313 // As a special case, the JVMS passed to this CallGenerator is
314 // for the method execution already in progress, not just the JVMS
315 // of the caller. Thus, this CallGenerator cannot be mixed with others!
316 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
370 return DirectCallGenerator::generate(jvms);
371 }
372
373 virtual void print_inlining_late(InliningResult result, const char* msg) {
374 CallNode* call = call_node();
375 Compile* C = Compile::current();
376 C->print_inlining_assert_ready();
377 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
378 C->print_inlining_move_to(this);
379 C->print_inlining_update_delayed(this);
380 }
381
382 virtual void set_unique_id(jlong id) {
383 _unique_id = id;
384 }
385
386 virtual jlong unique_id() const {
387 return _unique_id;
388 }
389
390 virtual CallGenerator* inline_cg() {
391 return _inline_cg;
392 }
393
394 virtual CallGenerator* with_call_node(CallNode* call) {
395 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
396 cg->set_call_node(call->as_CallStaticJava());
397 return cg;
398 }
399 };
400
401 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
402 return new LateInlineCallGenerator(method, inline_cg);
403 }
404
405 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
406 ciMethod* _caller;
407 bool _input_not_const;
408
409 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
410
411 public:
412 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
413 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
435 cg->set_call_node(call->as_CallStaticJava());
436 return cg;
437 }
438 };
439
440 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
441 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
442 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
443 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
444 // of late inlining with exceptions.
445 assert(!jvms->method()->has_exception_handlers() ||
446 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
447 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
448 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
449 bool allow_inline = C->inlining_incrementally();
450 bool input_not_const = true;
451 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
452 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
453
454 if (cg != nullptr) {
455 // AlwaysIncrementalInline causes for_method_handle_inline() to
456 // return a LateInlineCallGenerator. Extract the
457 // InlineCallGenerator from it.
458 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
459 cg = cg->inline_cg();
460 assert(cg != nullptr, "inline call generator expected");
461 }
462
463 if (!allow_inline && (C->print_inlining() || C->print_intrinsics())) {
464 C->print_inlining(cg->method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE,
465 "late method handle call resolution");
466 }
467 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
468 _inline_cg = cg;
469 C->dec_number_of_mh_late_inlines();
470 return true;
471 } else {
472 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
473 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
474 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
475 return false;
476 }
477 }
478
479 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
480 assert(IncrementalInlineMH, "required");
481 Compile::current()->inc_number_of_mh_late_inlines();
482 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
613
614 void LateInlineMHCallGenerator::do_late_inline() {
615 CallGenerator::do_late_inline_helper();
616 }
617
618 void LateInlineVirtualCallGenerator::do_late_inline() {
619 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
620 CallGenerator::do_late_inline_helper();
621 }
622
623 void CallGenerator::do_late_inline_helper() {
624 assert(is_late_inline(), "only late inline allowed");
625
626 // Can't inline it
627 CallNode* call = call_node();
628 if (call == nullptr || call->outcnt() == 0 ||
629 call->in(0) == nullptr || call->in(0)->is_top()) {
630 return;
631 }
632
633 const TypeTuple* r = call->tf()->domain_cc();
634 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
635 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
636 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
637 return;
638 }
639 }
640
641 if (call->in(TypeFunc::Memory)->is_top()) {
642 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
643 return;
644 }
645 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
646 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
647 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
648 return; // dead path
649 }
650 }
651
652 // check for unreachable loop
653 // Similar to incremental inlining, don't assert that all call
654 // projections are still there for post-parse call devirtualization.
655 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
656 CallProjections* callprojs = call->extract_projections(true, do_asserts);
657 if ((callprojs->fallthrough_catchproj == call->in(0)) ||
658 (callprojs->catchall_catchproj == call->in(0)) ||
659 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
660 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
661 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
662 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
663 (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
664 return;
665 }
666
667 Compile* C = Compile::current();
668 // Remove inlined methods from Compiler's lists.
669 if (call->is_macro()) {
670 C->remove_macro_node(call);
671 }
672
673
674 bool result_not_used = true;
675 for (uint i = 0; i < callprojs->nb_resproj; i++) {
676 if (callprojs->resproj[i] != nullptr) {
677 if (callprojs->resproj[i]->outcnt() != 0) {
678 result_not_used = false;
679 }
680 if (call->find_edge(callprojs->resproj[i]) != -1) {
681 return;
682 }
683 }
684 }
685
686 if (is_pure_call() && result_not_used) {
687 // The call is marked as pure (no important side effects), but result isn't used.
688 // It's safe to remove the call.
689 GraphKit kit(call->jvms());
690 kit.replace_call(call, C->top(), true, do_asserts);
691 } else {
692 // Make a clone of the JVMState that appropriate to use for driving a parse
693 JVMState* old_jvms = call->jvms();
694 JVMState* jvms = old_jvms->clone_shallow(C);
695 uint size = call->req();
696 SafePointNode* map = new SafePointNode(size, jvms);
697 for (uint i1 = 0; i1 < size; i1++) {
698 map->init_req(i1, call->in(i1));
699 }
700
701 PhaseGVN& gvn = *C->initial_gvn();
702 // Make sure the state is a MergeMem for parsing.
703 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
704 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
705 gvn.set_type_bottom(mem);
706 map->set_req(TypeFunc::Memory, mem);
707 }
708
709 // blow away old call arguments
710 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
711 map->set_req(i1, C->top());
712 }
713 jvms->set_map(map);
714
715 // Make enough space in the expression stack to transfer
716 // the incoming arguments and return value.
717 map->ensure_stack(jvms, jvms->method()->max_stack());
718 const TypeTuple* domain_sig = call->_tf->domain_sig();
719 uint nargs = method()->arg_size();
720 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
721
722 uint j = TypeFunc::Parms;
723 int arg_num = 0;
724 for (uint i1 = 0; i1 < nargs; i1++) {
725 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
726 if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
727 // Inline type arguments are not passed by reference: we get an argument per
728 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
729 GraphKit arg_kit(jvms, &gvn);
730 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
731 map->set_control(arg_kit.control());
732 map->set_argument(jvms, i1, vt);
733 } else {
734 map->set_argument(jvms, i1, call->in(j++));
735 }
736 if (t != Type::HALF) {
737 arg_num++;
738 }
739 }
740
741 C->print_inlining_assert_ready();
742
743 C->print_inlining_move_to(this);
744
745 C->log_late_inline(this);
746
747 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
748 if (!do_late_inline_check(C, jvms)) {
749 map->disconnect_inputs(C);
750 C->print_inlining_update_delayed(this);
751 return;
752 }
753 if (C->print_inlining() && (is_mh_late_inline() || is_virtual_late_inline())) {
754 C->print_inlining_update_delayed(this);
755 }
756
757 // Check if we are late inlining a method handle call that returns an inline type as fields.
758 Node* buffer_oop = nullptr;
759 ciMethod* inline_method = inline_cg()->method();
760 ciType* return_type = inline_method->return_type();
761 if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
762 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
763 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
764 // Do this before the method handle call in case the buffer allocation triggers deoptimization and
765 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
766 GraphKit arg_kit(jvms, &gvn);
767 {
768 PreserveReexecuteState preexecs(&arg_kit);
769 arg_kit.jvms()->set_should_reexecute(true);
770 arg_kit.inc_sp(nargs);
771 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
772 buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
773 }
774 jvms = arg_kit.transfer_exceptions_into_jvms();
775 }
776
777 // Setup default node notes to be picked up by the inlining
778 Node_Notes* old_nn = C->node_notes_at(call->_idx);
779 if (old_nn != nullptr) {
780 Node_Notes* entry_nn = old_nn->clone(C);
781 entry_nn->set_jvms(jvms);
782 C->set_default_node_notes(entry_nn);
783 }
784
785 // Now perform the inlining using the synthesized JVMState
786 JVMState* new_jvms = inline_cg()->generate(jvms);
787 if (new_jvms == nullptr) return; // no change
788 if (C->failing()) return;
789
790 // Capture any exceptional control flow
791 GraphKit kit(new_jvms);
792
793 // Find the result object
794 Node* result = C->top();
795 int result_size = method()->return_type()->size();
796 if (result_size != 0 && !kit.stopped()) {
797 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
798 }
799
800 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
801 result = kit.must_be_not_null(result, false);
802 }
803
804 if (inline_cg()->is_inline()) {
805 C->set_has_loops(C->has_loops() || inline_method->has_loops());
806 C->env()->notice_inlined_method(inline_method);
807 }
808 C->set_inlining_progress(true);
809 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
810
811 // Handle inline type returns
812 InlineTypeNode* vt = result->isa_InlineType();
813 if (vt != nullptr) {
814 if (call->tf()->returns_inline_type_as_fields()) {
815 vt->replace_call_results(&kit, call, C);
816 } else if (vt->is_InlineType()) {
817 // Result might still be allocated (for example, if it has been stored to a non-flat field)
818 if (!vt->is_allocated(&kit.gvn())) {
819 assert(buffer_oop != nullptr, "should have allocated a buffer");
820 RegionNode* region = new RegionNode(3);
821
822 // Check if result is null
823 Node* null_ctl = kit.top();
824 kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
825 region->init_req(1, null_ctl);
826 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
827 Node* init_mem = kit.reset_memory();
828 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
829
830 // Not null, initialize the buffer
831 kit.set_all_memory(init_mem);
832 vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
833 // Do not let stores that initialize this buffer be reordered with a subsequent
834 // store that would make this buffer accessible by other threads.
835 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
836 assert(alloc != nullptr, "must have an allocation node");
837 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
838 region->init_req(2, kit.control());
839 oop->init_req(2, buffer_oop);
840 mem->init_req(2, kit.merged_memory());
841
842 // Update oop input to buffer
843 kit.gvn().hash_delete(vt);
844 vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
845 vt->set_is_buffered(kit.gvn());
846 vt = kit.gvn().transform(vt)->as_InlineType();
847
848 kit.set_control(kit.gvn().transform(region));
849 kit.set_all_memory(kit.gvn().transform(mem));
850 kit.record_for_igvn(region);
851 kit.record_for_igvn(oop);
852 kit.record_for_igvn(mem);
853 }
854 result = vt;
855 }
856 DEBUG_ONLY(buffer_oop = nullptr);
857 } else {
858 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
859 }
860 assert(buffer_oop == nullptr, "unused buffer allocation");
861
862 kit.replace_call(call, result, true, do_asserts);
863 }
864 }
865
866 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
867
868 public:
869 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
870 LateInlineCallGenerator(method, inline_cg) {}
871
872 virtual JVMState* generate(JVMState* jvms) {
873 Compile *C = Compile::current();
874
875 C->log_inline_id(this);
876
877 C->add_string_late_inline(this);
878
879 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
880 return new_jvms;
881 }
1064 // Inline failed, so make a direct call.
1065 assert(_if_hit->is_inline(), "must have been a failed inline");
1066 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1067 new_jvms = cg->generate(kit.sync_jvms());
1068 }
1069 kit.add_exception_states_from(new_jvms);
1070 kit.set_jvms(new_jvms);
1071
1072 // Need to merge slow and fast?
1073 if (slow_map == nullptr) {
1074 // The fast path is the only path remaining.
1075 return kit.transfer_exceptions_into_jvms();
1076 }
1077
1078 if (kit.stopped()) {
1079 // Inlined method threw an exception, so it's just the slow path after all.
1080 kit.set_jvms(slow_jvms);
1081 return kit.transfer_exceptions_into_jvms();
1082 }
1083
1084 // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1085 uint tos = kit.jvms()->stkoff() + kit.sp();
1086 uint limit = slow_map->req();
1087 for (uint i = TypeFunc::Parms; i < limit; i++) {
1088 Node* m = kit.map()->in(i);
1089 Node* n = slow_map->in(i);
1090 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1091 // TODO 8284443 still needed?
1092 if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1093 // Allocate inline type in fast path
1094 m = m->as_InlineType()->buffer(&kit);
1095 kit.map()->set_req(i, m);
1096 }
1097 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1098 // Allocate inline type in slow path
1099 PreserveJVMState pjvms(&kit);
1100 kit.set_map(slow_map);
1101 n = n->as_InlineType()->buffer(&kit);
1102 kit.map()->set_req(i, n);
1103 slow_map = kit.stop();
1104 }
1105 }
1106
1107 // There are 2 branches and the replaced nodes are only valid on
1108 // one: restore the replaced nodes to what they were before the
1109 // branch.
1110 kit.map()->set_replaced_nodes(replaced_nodes);
1111
1112 // Finish the diamond.
1113 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1114 RegionNode* region = new RegionNode(3);
1115 region->init_req(1, kit.control());
1116 region->init_req(2, slow_map->control());
1117 kit.set_control(gvn.transform(region));
1118 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1119 iophi->set_req(2, slow_map->i_o());
1120 kit.set_i_o(gvn.transform(iophi));
1121 // Merge memory
1122 kit.merge_memory(slow_map->merged_memory(), region, 2);
1123 // Transform new memory Phis.
1124 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1125 Node* phi = mms.memory();
1126 if (phi->is_Phi() && phi->in(0) == region) {
1127 mms.set_memory(gvn.transform(phi));
1128 }
1129 }
1130 for (uint i = TypeFunc::Parms; i < limit; i++) {
1131 // Skip unused stack slots; fast forward to monoff();
1132 if (i == tos) {
1133 i = kit.jvms()->monoff();
1134 if( i >= limit ) break;
1135 }
1136 Node* m = kit.map()->in(i);
1137 Node* n = slow_map->in(i);
1138 if (m != n) {
1139 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1140 Node* phi = PhiNode::make(region, m, t);
1141 phi->set_req(2, n);
1142 kit.map()->set_req(i, gvn.transform(phi));
1143 }
1144 }
1145 return kit.transfer_exceptions_into_jvms();
1146 }
1147
1148
1149 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1150 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1151 bool input_not_const;
1152 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1153 Compile* C = Compile::current();
1154 bool should_delay = C->should_delay_inlining();
1155 if (cg != nullptr) {
1156 if (should_delay) {
1157 return CallGenerator::for_late_inline(callee, cg);
1158 } else {
1159 return cg;
1160 }
1161 }
1162 int bci = jvms->bci();
1163 ciCallProfile profile = caller->call_profile_at_bci(bci);
1164 int call_site_count = caller->scale_count(profile.count());
1165
1166 if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1167 (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1168 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1169 } else {
1170 // Out-of-line call.
1171 return CallGenerator::for_direct_call(callee);
1172 }
1173 }
1174
1175
1176 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1177 GraphKit kit(jvms);
1178 PhaseGVN& gvn = kit.gvn();
1179 Compile* C = kit.C;
1180 vmIntrinsics::ID iid = callee->intrinsic_id();
1181 input_not_const = true;
1182 if (StressMethodHandleLinkerInlining) {
1183 allow_inline = false;
1184 }
1185 switch (iid) {
1186 case vmIntrinsics::_invokeBasic:
1187 {
1188 // Get MethodHandle receiver:
1189 Node* receiver = kit.argument(0);
1190 if (receiver->Opcode() == Op_ConP) {
1191 input_not_const = false;
1192 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1193 if (recv_toop != nullptr) {
1194 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1195 const int vtable_index = Method::invalid_vtable_index;
1207 PROB_ALWAYS);
1208 return cg;
1209 } else {
1210 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1211 Type::str(receiver->bottom_type()));
1212 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1213 "receiver is always null");
1214 }
1215 } else {
1216 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1217 "receiver not constant");
1218 }
1219 }
1220 break;
1221
1222 case vmIntrinsics::_linkToVirtual:
1223 case vmIntrinsics::_linkToStatic:
1224 case vmIntrinsics::_linkToSpecial:
1225 case vmIntrinsics::_linkToInterface:
1226 {
1227 int nargs = callee->arg_size();
1228 // Get MemberName argument:
1229 Node* member_name = kit.argument(nargs - 1);
1230 if (member_name->Opcode() == Op_ConP) {
1231 input_not_const = false;
1232 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1233 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1234
1235 if (!ciMethod::is_consistent_info(callee, target)) {
1236 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1237 "signatures mismatch");
1238 return nullptr;
1239 }
1240
1241 // In lambda forms we erase signature types to avoid resolving issues
1242 // involving class loaders. When we optimize a method handle invoke
1243 // to a direct call we must cast the receiver and arguments to its
1244 // actual types.
1245 ciSignature* signature = target->signature();
1246 const int receiver_skip = target->is_static() ? 0 : 1;
1247 // Cast receiver to its type.
1248 if (!target->is_static()) {
1249 Node* recv = kit.argument(0);
1282 ciKlass* speculative_receiver_type = nullptr;
1283 if (is_virtual_or_interface) {
1284 ciInstanceKlass* klass = target->holder();
1285 Node* receiver_node = kit.argument(0);
1286 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1287 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1288 // optimize_virtual_call() takes 2 different holder
1289 // arguments for a corner case that doesn't apply here (see
1290 // Parse::do_call())
1291 target = C->optimize_virtual_call(caller, klass, klass,
1292 target, receiver_type, is_virtual,
1293 call_does_dispatch, vtable_index, // out-parameters
1294 false /* check_access */);
1295 // We lack profiling at this call but type speculation may
1296 // provide us with a type
1297 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1298 }
1299 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1300 allow_inline,
1301 PROB_ALWAYS,
1302 speculative_receiver_type,
1303 true);
1304 return cg;
1305 } else {
1306 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1307 "member_name not constant");
1308 }
1309 }
1310 break;
1311
1312 case vmIntrinsics::_linkToNative:
1313 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1314 "native call");
1315 break;
1316
1317 default:
1318 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1319 break;
1320 }
1321 return nullptr;
1322 }
1323
1358 // do_intrinsic(0)
1359 // else
1360 // if (predicate(1))
1361 // do_intrinsic(1)
1362 // ...
1363 // else
1364 // do_java_comp
1365
1366 GraphKit kit(jvms);
1367 PhaseGVN& gvn = kit.gvn();
1368
1369 CompileLog* log = kit.C->log();
1370 if (log != nullptr) {
1371 log->elem("predicated_intrinsic bci='%d' method='%d'",
1372 jvms->bci(), log->identify(method()));
1373 }
1374
1375 if (!method()->is_static()) {
1376 // We need an explicit receiver null_check before checking its type in predicate.
1377 // We share a map with the caller, so his JVMS gets adjusted.
1378 kit.null_check_receiver_before_call(method());
1379 if (kit.stopped()) {
1380 return kit.transfer_exceptions_into_jvms();
1381 }
1382 }
1383
1384 int n_predicates = _intrinsic->predicates_count();
1385 assert(n_predicates > 0, "sanity");
1386
1387 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1388
1389 // Region for normal compilation code if intrinsic failed.
1390 Node* slow_region = new RegionNode(1);
1391
1392 int results = 0;
1393 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1394 #ifdef ASSERT
1395 JVMState* old_jvms = kit.jvms();
1396 SafePointNode* old_map = kit.map();
1397 Node* old_io = old_map->i_o();
1398 Node* old_mem = old_map->memory();
|