18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/os.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "utilities/debug.hpp"
45
46 // Utility function.
47 const TypeFunc* CallGenerator::tf() const {
48 return TypeFunc::make(method());
49 }
50
51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53 }
54
55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57 return is_inlined_method_handle_intrinsic(symbolic_info, m);
103 GraphKit& exits = parser.exits();
104
105 if (C->failing()) {
106 while (exits.pop_exception_state() != nullptr) ;
107 return nullptr;
108 }
109
110 assert(exits.jvms()->same_calls_as(jvms), "sanity");
111
112 // Simply return the exit state of the parser,
113 // augmented by any exceptional states.
114 return exits.transfer_exceptions_into_jvms();
115 }
116
117 //---------------------------DirectCallGenerator------------------------------
118 // Internal class which handles all out-of-line calls w/o receiver type checks.
119 class DirectCallGenerator : public CallGenerator {
120 private:
121 CallStaticJavaNode* _call_node;
122 // Force separate memory and I/O projections for the exceptional
123 // paths to facilitate late inlinig.
124 bool _separate_io_proj;
125
126 protected:
127 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _separate_io_proj(separate_io_proj)
133 {
134 }
135 virtual JVMState* generate(JVMState* jvms);
136
137 virtual CallNode* call_node() const { return _call_node; }
138 virtual CallGenerator* with_call_node(CallNode* call) {
139 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
140 dcg->set_call_node(call->as_CallStaticJava());
141 return dcg;
142 }
143 };
144
145 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
146 GraphKit kit(jvms);
147 kit.C->print_inlining_update(this);
148 bool is_static = method()->is_static();
149 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
150 : SharedRuntime::get_resolve_opt_virtual_call_stub();
151
152 if (kit.C->log() != nullptr) {
153 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
154 }
155
156 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
157 if (is_inlined_method_handle_intrinsic(jvms, method())) {
158 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
159 // additional information about the method being invoked should be attached
160 // to the call site to make resolution logic work
161 // (see SharedRuntime::resolve_static_call_C).
162 call->set_override_symbolic_info(true);
163 }
164 _call_node = call; // Save the call node in case we need it later
165 if (!is_static) {
166 // Make an explicit receiver null_check as part of this call.
167 // Since we share a map with the caller, his JVMS gets adjusted.
168 kit.null_check_receiver_before_call(method());
169 if (kit.stopped()) {
170 // And dump it back to the caller, decorated with any exceptions:
171 return kit.transfer_exceptions_into_jvms();
172 }
173 // Mark the call node as virtual, sort of:
174 call->set_optimized_virtual(true);
175 if (method()->is_method_handle_intrinsic() ||
176 method()->is_compiled_lambda_form()) {
177 call->set_method_handle_invoke(true);
178 }
179 }
180 kit.set_arguments_for_java_call(call);
181 kit.set_edges_for_java_call(call, false, _separate_io_proj);
182 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
183 kit.push_node(method()->return_type()->basic_type(), ret);
184 return kit.transfer_exceptions_into_jvms();
185 }
186
187 //--------------------------VirtualCallGenerator------------------------------
188 // Internal class which handles all out-of-line calls checking receiver type.
189 class VirtualCallGenerator : public CallGenerator {
190 private:
191 int _vtable_index;
192 bool _separate_io_proj;
193 CallDynamicJavaNode* _call_node;
194
195 protected:
196 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
197
198 public:
199 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
200 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
201 {
202 assert(vtable_index == Method::invalid_vtable_index ||
203 vtable_index >= 0, "either invalid or usable");
204 }
205 virtual bool is_virtual() const { return true; }
206 virtual JVMState* generate(JVMState* jvms);
207
208 virtual CallNode* call_node() const { return _call_node; }
209 int vtable_index() const { return _vtable_index; }
210
211 virtual CallGenerator* with_call_node(CallNode* call) {
212 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
213 cg->set_call_node(call->as_CallDynamicJava());
214 return cg;
215 }
216 };
217
218 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
219 GraphKit kit(jvms);
220 Node* receiver = kit.argument(0);
221
222 kit.C->print_inlining_update(this);
223
224 if (kit.C->log() != nullptr) {
225 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
226 }
227
228 // If the receiver is a constant null, do not torture the system
229 // by attempting to call through it. The compile will proceed
230 // correctly, but may bail out in final_graph_reshaping, because
231 // the call instruction will have a seemingly deficient out-count.
232 // (The bailout says something misleading about an "infinite loop".)
233 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
234 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
235 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
236 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
237 kit.inc_sp(arg_size); // restore arguments
238 kit.uncommon_trap(Deoptimization::Reason_null_check,
239 Deoptimization::Action_none,
240 nullptr, "null receiver");
241 return kit.transfer_exceptions_into_jvms();
261 }
262
263 assert(!method()->is_static(), "virtual call must not be to static");
264 assert(!method()->is_final(), "virtual call should not be to final");
265 assert(!method()->is_private(), "virtual call should not be to private");
266 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
267 "no vtable calls if +UseInlineCaches ");
268 address target = SharedRuntime::get_resolve_virtual_call_stub();
269 // Normal inline cache used for call
270 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
271 if (is_inlined_method_handle_intrinsic(jvms, method())) {
272 // To be able to issue a direct call (optimized virtual or virtual)
273 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
274 // about the method being invoked should be attached to the call site to
275 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
276 call->set_override_symbolic_info(true);
277 }
278 _call_node = call; // Save the call node in case we need it later
279
280 kit.set_arguments_for_java_call(call);
281 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
282 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
283 kit.push_node(method()->return_type()->basic_type(), ret);
284
285 // Represent the effect of an implicit receiver null_check
286 // as part of this call. Since we share a map with the caller,
287 // his JVMS gets adjusted.
288 kit.cast_not_null(receiver);
289 return kit.transfer_exceptions_into_jvms();
290 }
291
292 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
293 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
294 return new ParseGenerator(m, expected_uses);
295 }
296
297 // As a special case, the JVMS passed to this CallGenerator is
298 // for the method execution already in progress, not just the JVMS
299 // of the caller. Thus, this CallGenerator cannot be mixed with others!
300 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
354 return DirectCallGenerator::generate(jvms);
355 }
356
357 virtual void print_inlining_late(InliningResult result, const char* msg) {
358 CallNode* call = call_node();
359 Compile* C = Compile::current();
360 C->print_inlining_assert_ready();
361 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
362 C->print_inlining_move_to(this);
363 C->print_inlining_update_delayed(this);
364 }
365
366 virtual void set_unique_id(jlong id) {
367 _unique_id = id;
368 }
369
370 virtual jlong unique_id() const {
371 return _unique_id;
372 }
373
374 virtual CallGenerator* with_call_node(CallNode* call) {
375 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
376 cg->set_call_node(call->as_CallStaticJava());
377 return cg;
378 }
379 };
380
381 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
382 return new LateInlineCallGenerator(method, inline_cg);
383 }
384
385 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
386 ciMethod* _caller;
387 bool _input_not_const;
388
389 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
390
391 public:
392 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
393 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
415 cg->set_call_node(call->as_CallStaticJava());
416 return cg;
417 }
418 };
419
420 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
421 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
422 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
423 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
424 // of late inlining with exceptions.
425 assert(!jvms->method()->has_exception_handlers() ||
426 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
427 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
428 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
429 bool allow_inline = C->inlining_incrementally();
430 bool input_not_const = true;
431 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
432 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
433
434 if (cg != nullptr) {
435 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
436 _inline_cg = cg;
437 C->dec_number_of_mh_late_inlines();
438 return true;
439 } else {
440 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
441 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
442 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
443 return false;
444 }
445 }
446
447 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
448 assert(IncrementalInlineMH, "required");
449 Compile::current()->inc_number_of_mh_late_inlines();
450 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
451 return cg;
452 }
453
454 // Allow inlining decisions to be delayed
577
578 void LateInlineMHCallGenerator::do_late_inline() {
579 CallGenerator::do_late_inline_helper();
580 }
581
582 void LateInlineVirtualCallGenerator::do_late_inline() {
583 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
584 CallGenerator::do_late_inline_helper();
585 }
586
587 void CallGenerator::do_late_inline_helper() {
588 assert(is_late_inline(), "only late inline allowed");
589
590 // Can't inline it
591 CallNode* call = call_node();
592 if (call == nullptr || call->outcnt() == 0 ||
593 call->in(0) == nullptr || call->in(0)->is_top()) {
594 return;
595 }
596
597 const TypeTuple *r = call->tf()->domain();
598 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
599 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
600 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
601 return;
602 }
603 }
604
605 if (call->in(TypeFunc::Memory)->is_top()) {
606 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
607 return;
608 }
609 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
610 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
611 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
612 return; // dead path
613 }
614 }
615
616 // check for unreachable loop
617 CallProjections callprojs;
618 call->extract_projections(&callprojs, true);
619 if ((callprojs.fallthrough_catchproj == call->in(0)) ||
620 (callprojs.catchall_catchproj == call->in(0)) ||
621 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
622 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
623 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
624 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
625 (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
626 (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) {
627 return;
628 }
629
630 Compile* C = Compile::current();
631 // Remove inlined methods from Compiler's lists.
632 if (call->is_macro()) {
633 C->remove_macro_node(call);
634 }
635
636 // The call is marked as pure (no important side effects), but result isn't used.
637 // It's safe to remove the call.
638 bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
639
640 if (is_pure_call() && result_not_used) {
641 GraphKit kit(call->jvms());
642 kit.replace_call(call, C->top(), true);
643 } else {
644 // Make a clone of the JVMState that appropriate to use for driving a parse
645 JVMState* old_jvms = call->jvms();
646 JVMState* jvms = old_jvms->clone_shallow(C);
647 uint size = call->req();
648 SafePointNode* map = new SafePointNode(size, jvms);
649 for (uint i1 = 0; i1 < size; i1++) {
650 map->init_req(i1, call->in(i1));
651 }
652
653 // Make sure the state is a MergeMem for parsing.
654 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
655 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
656 C->initial_gvn()->set_type_bottom(mem);
657 map->set_req(TypeFunc::Memory, mem);
658 }
659
660 uint nargs = method()->arg_size();
661 // blow away old call arguments
662 Node* top = C->top();
663 for (uint i1 = 0; i1 < nargs; i1++) {
664 map->set_req(TypeFunc::Parms + i1, top);
665 }
666 jvms->set_map(map);
667
668 // Make enough space in the expression stack to transfer
669 // the incoming arguments and return value.
670 map->ensure_stack(jvms, jvms->method()->max_stack());
671 for (uint i1 = 0; i1 < nargs; i1++) {
672 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
673 }
674
675 C->print_inlining_assert_ready();
676
677 C->print_inlining_move_to(this);
678
679 C->log_late_inline(this);
680
681 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
682 if (!do_late_inline_check(C, jvms)) {
683 map->disconnect_inputs(C);
684 C->print_inlining_update_delayed(this);
685 return;
686 }
687 if (C->print_inlining() && (is_mh_late_inline() || is_virtual_late_inline())) {
688 C->print_inlining_update_delayed(this);
689 }
690
691 // Setup default node notes to be picked up by the inlining
692 Node_Notes* old_nn = C->node_notes_at(call->_idx);
693 if (old_nn != nullptr) {
694 Node_Notes* entry_nn = old_nn->clone(C);
695 entry_nn->set_jvms(jvms);
696 C->set_default_node_notes(entry_nn);
697 }
698
699 // Now perform the inlining using the synthesized JVMState
700 JVMState* new_jvms = inline_cg()->generate(jvms);
701 if (new_jvms == nullptr) return; // no change
702 if (C->failing()) return;
703
704 // Capture any exceptional control flow
705 GraphKit kit(new_jvms);
706
707 // Find the result object
708 Node* result = C->top();
709 int result_size = method()->return_type()->size();
710 if (result_size != 0 && !kit.stopped()) {
711 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
712 }
713
714 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
715 result = kit.must_be_not_null(result, false);
716 }
717
718 if (inline_cg()->is_inline()) {
719 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
720 C->env()->notice_inlined_method(inline_cg()->method());
721 }
722 C->set_inlining_progress(true);
723 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
724 kit.replace_call(call, result, true);
725 }
726 }
727
728 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
729
730 public:
731 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
732 LateInlineCallGenerator(method, inline_cg) {}
733
734 virtual JVMState* generate(JVMState* jvms) {
735 Compile *C = Compile::current();
736
737 C->log_inline_id(this);
738
739 C->add_string_late_inline(this);
740
741 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
742 return new_jvms;
743 }
926 // Inline failed, so make a direct call.
927 assert(_if_hit->is_inline(), "must have been a failed inline");
928 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
929 new_jvms = cg->generate(kit.sync_jvms());
930 }
931 kit.add_exception_states_from(new_jvms);
932 kit.set_jvms(new_jvms);
933
934 // Need to merge slow and fast?
935 if (slow_map == nullptr) {
936 // The fast path is the only path remaining.
937 return kit.transfer_exceptions_into_jvms();
938 }
939
940 if (kit.stopped()) {
941 // Inlined method threw an exception, so it's just the slow path after all.
942 kit.set_jvms(slow_jvms);
943 return kit.transfer_exceptions_into_jvms();
944 }
945
946 // There are 2 branches and the replaced nodes are only valid on
947 // one: restore the replaced nodes to what they were before the
948 // branch.
949 kit.map()->set_replaced_nodes(replaced_nodes);
950
951 // Finish the diamond.
952 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
953 RegionNode* region = new RegionNode(3);
954 region->init_req(1, kit.control());
955 region->init_req(2, slow_map->control());
956 kit.set_control(gvn.transform(region));
957 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
958 iophi->set_req(2, slow_map->i_o());
959 kit.set_i_o(gvn.transform(iophi));
960 // Merge memory
961 kit.merge_memory(slow_map->merged_memory(), region, 2);
962 // Transform new memory Phis.
963 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
964 Node* phi = mms.memory();
965 if (phi->is_Phi() && phi->in(0) == region) {
966 mms.set_memory(gvn.transform(phi));
967 }
968 }
969 uint tos = kit.jvms()->stkoff() + kit.sp();
970 uint limit = slow_map->req();
971 for (uint i = TypeFunc::Parms; i < limit; i++) {
972 // Skip unused stack slots; fast forward to monoff();
973 if (i == tos) {
974 i = kit.jvms()->monoff();
975 if( i >= limit ) break;
976 }
977 Node* m = kit.map()->in(i);
978 Node* n = slow_map->in(i);
979 if (m != n) {
980 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
981 Node* phi = PhiNode::make(region, m, t);
982 phi->set_req(2, n);
983 kit.map()->set_req(i, gvn.transform(phi));
984 }
985 }
986 return kit.transfer_exceptions_into_jvms();
987 }
988
989
990 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
991 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
992 bool input_not_const;
993 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
994 Compile* C = Compile::current();
995 bool should_delay = C->should_delay_inlining();
996 if (cg != nullptr) {
997 if (should_delay) {
998 return CallGenerator::for_late_inline(callee, cg);
999 } else {
1000 return cg;
1001 }
1002 }
1003 int bci = jvms->bci();
1004 ciCallProfile profile = caller->call_profile_at_bci(bci);
1005 int call_site_count = caller->scale_count(profile.count());
1006
1007 if (IncrementalInlineMH && call_site_count > 0 &&
1008 (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1009 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1010 } else {
1011 // Out-of-line call.
1012 return CallGenerator::for_direct_call(callee);
1013 }
1014 }
1015
1016 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1017 GraphKit kit(jvms);
1018 PhaseGVN& gvn = kit.gvn();
1019 Compile* C = kit.C;
1020 vmIntrinsics::ID iid = callee->intrinsic_id();
1021 input_not_const = true;
1022 if (StressMethodHandleLinkerInlining) {
1023 allow_inline = false;
1024 }
1025 switch (iid) {
1026 case vmIntrinsics::_invokeBasic:
1027 {
1028 // Get MethodHandle receiver:
1029 Node* receiver = kit.argument(0);
1030 if (receiver->Opcode() == Op_ConP) {
1031 input_not_const = false;
1032 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1033 if (recv_toop != nullptr) {
1034 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1035 const int vtable_index = Method::invalid_vtable_index;
1047 PROB_ALWAYS);
1048 return cg;
1049 } else {
1050 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1051 Type::str(receiver->bottom_type()));
1052 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1053 "receiver is always null");
1054 }
1055 } else {
1056 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1057 "receiver not constant");
1058 }
1059 }
1060 break;
1061
1062 case vmIntrinsics::_linkToVirtual:
1063 case vmIntrinsics::_linkToStatic:
1064 case vmIntrinsics::_linkToSpecial:
1065 case vmIntrinsics::_linkToInterface:
1066 {
1067 // Get MemberName argument:
1068 Node* member_name = kit.argument(callee->arg_size() - 1);
1069 if (member_name->Opcode() == Op_ConP) {
1070 input_not_const = false;
1071 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1072 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1073
1074 if (!ciMethod::is_consistent_info(callee, target)) {
1075 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1076 "signatures mismatch");
1077 return nullptr;
1078 }
1079
1080 // In lambda forms we erase signature types to avoid resolving issues
1081 // involving class loaders. When we optimize a method handle invoke
1082 // to a direct call we must cast the receiver and arguments to its
1083 // actual types.
1084 ciSignature* signature = target->signature();
1085 const int receiver_skip = target->is_static() ? 0 : 1;
1086 // Cast receiver to its type.
1087 if (!target->is_static()) {
1088 Node* arg = kit.argument(0);
1089 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1090 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1091 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
1092 const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part
1093 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1094 kit.set_argument(0, cast_obj);
1095 }
1096 }
1097 // Cast reference arguments to its type.
1098 for (int i = 0, j = 0; i < signature->count(); i++) {
1099 ciType* t = signature->type_at(i);
1100 if (t->is_klass()) {
1101 Node* arg = kit.argument(receiver_skip + j);
1102 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1103 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1104 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
1105 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1106 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1107 kit.set_argument(receiver_skip + j, cast_obj);
1108 }
1109 }
1110 j += t->size(); // long and double take two slots
1111 }
1112
1113 // Try to get the most accurate receiver type
1114 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1115 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1116 int vtable_index = Method::invalid_vtable_index;
1117 bool call_does_dispatch = false;
1118
1119 ciKlass* speculative_receiver_type = nullptr;
1120 if (is_virtual_or_interface) {
1121 ciInstanceKlass* klass = target->holder();
1122 Node* receiver_node = kit.argument(0);
1123 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1124 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1125 // optimize_virtual_call() takes 2 different holder
1126 // arguments for a corner case that doesn't apply here (see
1127 // Parse::do_call())
1128 target = C->optimize_virtual_call(caller, klass, klass,
1129 target, receiver_type, is_virtual,
1130 call_does_dispatch, vtable_index, // out-parameters
1131 false /* check_access */);
1132 // We lack profiling at this call but type speculation may
1133 // provide us with a type
1134 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1135 }
1136 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1137 allow_inline,
1138 PROB_ALWAYS,
1139 speculative_receiver_type);
1140 return cg;
1141 } else {
1142 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1143 "member_name not constant");
1144 }
1145 }
1146 break;
1147
1148 case vmIntrinsics::_linkToNative:
1149 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1150 "native call");
1151 break;
1152
1153 default:
1154 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1155 break;
1156 }
1157 return nullptr;
1158 }
1159
1194 // do_intrinsic(0)
1195 // else
1196 // if (predicate(1))
1197 // do_intrinsic(1)
1198 // ...
1199 // else
1200 // do_java_comp
1201
1202 GraphKit kit(jvms);
1203 PhaseGVN& gvn = kit.gvn();
1204
1205 CompileLog* log = kit.C->log();
1206 if (log != nullptr) {
1207 log->elem("predicated_intrinsic bci='%d' method='%d'",
1208 jvms->bci(), log->identify(method()));
1209 }
1210
1211 if (!method()->is_static()) {
1212 // We need an explicit receiver null_check before checking its type in predicate.
1213 // We share a map with the caller, so his JVMS gets adjusted.
1214 Node* receiver = kit.null_check_receiver_before_call(method());
1215 if (kit.stopped()) {
1216 return kit.transfer_exceptions_into_jvms();
1217 }
1218 }
1219
1220 int n_predicates = _intrinsic->predicates_count();
1221 assert(n_predicates > 0, "sanity");
1222
1223 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1224
1225 // Region for normal compilation code if intrinsic failed.
1226 Node* slow_region = new RegionNode(1);
1227
1228 int results = 0;
1229 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1230 #ifdef ASSERT
1231 JVMState* old_jvms = kit.jvms();
1232 SafePointNode* old_map = kit.map();
1233 Node* old_io = old_map->i_o();
1234 Node* old_mem = old_map->memory();
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/subnode.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/debug.hpp"
46
47 // Utility function.
48 const TypeFunc* CallGenerator::tf() const {
49 return TypeFunc::make(method());
50 }
51
52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
54 }
55
56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
57 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
58 return is_inlined_method_handle_intrinsic(symbolic_info, m);
104 GraphKit& exits = parser.exits();
105
106 if (C->failing()) {
107 while (exits.pop_exception_state() != nullptr) ;
108 return nullptr;
109 }
110
111 assert(exits.jvms()->same_calls_as(jvms), "sanity");
112
113 // Simply return the exit state of the parser,
114 // augmented by any exceptional states.
115 return exits.transfer_exceptions_into_jvms();
116 }
117
118 //---------------------------DirectCallGenerator------------------------------
119 // Internal class which handles all out-of-line calls w/o receiver type checks.
120 class DirectCallGenerator : public CallGenerator {
121 private:
122 CallStaticJavaNode* _call_node;
123 // Force separate memory and I/O projections for the exceptional
124 // paths to facilitate late inlining.
125 bool _separate_io_proj;
126
127 protected:
128 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
129
130 public:
131 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
132 : CallGenerator(method),
133 _call_node(nullptr),
134 _separate_io_proj(separate_io_proj)
135 {
136 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
137 // If that call has not been optimized by the time optimizations are over,
138 // we'll need to add a call to create an inline type instance from the klass
139 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
140 // Separating memory and I/O projections for exceptions is required to
141 // perform that graph transformation.
142 _separate_io_proj = true;
143 }
144 }
145 virtual JVMState* generate(JVMState* jvms);
146
147 virtual CallNode* call_node() const { return _call_node; }
148 virtual CallGenerator* with_call_node(CallNode* call) {
149 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
150 dcg->set_call_node(call->as_CallStaticJava());
151 return dcg;
152 }
153 };
154
155 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
156 GraphKit kit(jvms);
157 kit.C->print_inlining_update(this);
158 PhaseGVN& gvn = kit.gvn();
159 bool is_static = method()->is_static();
160 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
161 : SharedRuntime::get_resolve_opt_virtual_call_stub();
162
163 if (kit.C->log() != nullptr) {
164 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
165 }
166
167 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
168 if (is_inlined_method_handle_intrinsic(jvms, method())) {
169 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
170 // additional information about the method being invoked should be attached
171 // to the call site to make resolution logic work
172 // (see SharedRuntime::resolve_static_call_C).
173 call->set_override_symbolic_info(true);
174 }
175 _call_node = call; // Save the call node in case we need it later
176 if (!is_static) {
177 // Make an explicit receiver null_check as part of this call.
178 // Since we share a map with the caller, his JVMS gets adjusted.
179 kit.null_check_receiver_before_call(method());
180 if (kit.stopped()) {
181 // And dump it back to the caller, decorated with any exceptions:
182 return kit.transfer_exceptions_into_jvms();
183 }
184 // Mark the call node as virtual, sort of:
185 call->set_optimized_virtual(true);
186 if (method()->is_method_handle_intrinsic() ||
187 method()->is_compiled_lambda_form()) {
188 call->set_method_handle_invoke(true);
189 }
190 }
191 kit.set_arguments_for_java_call(call, is_late_inline());
192 if (kit.stopped()) {
193 return kit.transfer_exceptions_into_jvms();
194 }
195 kit.set_edges_for_java_call(call, false, _separate_io_proj);
196 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
197 kit.push_node(method()->return_type()->basic_type(), ret);
198 return kit.transfer_exceptions_into_jvms();
199 }
200
201 //--------------------------VirtualCallGenerator------------------------------
202 // Internal class which handles all out-of-line calls checking receiver type.
203 class VirtualCallGenerator : public CallGenerator {
204 private:
205 int _vtable_index;
206 bool _separate_io_proj;
207 CallDynamicJavaNode* _call_node;
208
209 protected:
210 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
211
212 public:
213 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
214 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
215 {
216 assert(vtable_index == Method::invalid_vtable_index ||
217 vtable_index >= 0, "either invalid or usable");
218 }
219 virtual bool is_virtual() const { return true; }
220 virtual JVMState* generate(JVMState* jvms);
221
222 virtual CallNode* call_node() const { return _call_node; }
223 int vtable_index() const { return _vtable_index; }
224
225 virtual CallGenerator* with_call_node(CallNode* call) {
226 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
227 cg->set_call_node(call->as_CallDynamicJava());
228 return cg;
229 }
230 };
231
232 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
233 GraphKit kit(jvms);
234 Node* receiver = kit.argument(0);
235 kit.C->print_inlining_update(this);
236
237 if (kit.C->log() != nullptr) {
238 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
239 }
240
241 // If the receiver is a constant null, do not torture the system
242 // by attempting to call through it. The compile will proceed
243 // correctly, but may bail out in final_graph_reshaping, because
244 // the call instruction will have a seemingly deficient out-count.
245 // (The bailout says something misleading about an "infinite loop".)
246 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
247 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
248 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
249 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
250 kit.inc_sp(arg_size); // restore arguments
251 kit.uncommon_trap(Deoptimization::Reason_null_check,
252 Deoptimization::Action_none,
253 nullptr, "null receiver");
254 return kit.transfer_exceptions_into_jvms();
274 }
275
276 assert(!method()->is_static(), "virtual call must not be to static");
277 assert(!method()->is_final(), "virtual call should not be to final");
278 assert(!method()->is_private(), "virtual call should not be to private");
279 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
280 "no vtable calls if +UseInlineCaches ");
281 address target = SharedRuntime::get_resolve_virtual_call_stub();
282 // Normal inline cache used for call
283 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
284 if (is_inlined_method_handle_intrinsic(jvms, method())) {
285 // To be able to issue a direct call (optimized virtual or virtual)
286 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
287 // about the method being invoked should be attached to the call site to
288 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
289 call->set_override_symbolic_info(true);
290 }
291 _call_node = call; // Save the call node in case we need it later
292
293 kit.set_arguments_for_java_call(call);
294 if (kit.stopped()) {
295 return kit.transfer_exceptions_into_jvms();
296 }
297 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
298 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
299 kit.push_node(method()->return_type()->basic_type(), ret);
300
301 // Represent the effect of an implicit receiver null_check
302 // as part of this call. Since we share a map with the caller,
303 // his JVMS gets adjusted.
304 kit.cast_not_null(receiver);
305 return kit.transfer_exceptions_into_jvms();
306 }
307
308 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
309 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
310 return new ParseGenerator(m, expected_uses);
311 }
312
313 // As a special case, the JVMS passed to this CallGenerator is
314 // for the method execution already in progress, not just the JVMS
315 // of the caller. Thus, this CallGenerator cannot be mixed with others!
316 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
370 return DirectCallGenerator::generate(jvms);
371 }
372
373 virtual void print_inlining_late(InliningResult result, const char* msg) {
374 CallNode* call = call_node();
375 Compile* C = Compile::current();
376 C->print_inlining_assert_ready();
377 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg);
378 C->print_inlining_move_to(this);
379 C->print_inlining_update_delayed(this);
380 }
381
382 virtual void set_unique_id(jlong id) {
383 _unique_id = id;
384 }
385
386 virtual jlong unique_id() const {
387 return _unique_id;
388 }
389
390 virtual CallGenerator* inline_cg() {
391 return _inline_cg;
392 }
393
394 virtual CallGenerator* with_call_node(CallNode* call) {
395 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
396 cg->set_call_node(call->as_CallStaticJava());
397 return cg;
398 }
399 };
400
401 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
402 return new LateInlineCallGenerator(method, inline_cg);
403 }
404
405 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
406 ciMethod* _caller;
407 bool _input_not_const;
408
409 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
410
411 public:
412 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
413 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
435 cg->set_call_node(call->as_CallStaticJava());
436 return cg;
437 }
438 };
439
440 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
441 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
442 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
443 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
444 // of late inlining with exceptions.
445 assert(!jvms->method()->has_exception_handlers() ||
446 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
447 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
448 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
449 bool allow_inline = C->inlining_incrementally();
450 bool input_not_const = true;
451 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
452 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
453
454 if (cg != nullptr) {
455 // AlwaysIncrementalInline causes for_method_handle_inline() to
456 // return a LateInlineCallGenerator. Extract the
457 // InlineCallGenerator from it.
458 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
459 cg = cg->inline_cg();
460 assert(cg != nullptr, "inline call generator expected");
461 }
462
463 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
464 _inline_cg = cg;
465 C->dec_number_of_mh_late_inlines();
466 return true;
467 } else {
468 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
469 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
470 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
471 return false;
472 }
473 }
474
475 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
476 assert(IncrementalInlineMH, "required");
477 Compile::current()->inc_number_of_mh_late_inlines();
478 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
479 return cg;
480 }
481
482 // Allow inlining decisions to be delayed
605
606 void LateInlineMHCallGenerator::do_late_inline() {
607 CallGenerator::do_late_inline_helper();
608 }
609
610 void LateInlineVirtualCallGenerator::do_late_inline() {
611 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
612 CallGenerator::do_late_inline_helper();
613 }
614
615 void CallGenerator::do_late_inline_helper() {
616 assert(is_late_inline(), "only late inline allowed");
617
618 // Can't inline it
619 CallNode* call = call_node();
620 if (call == nullptr || call->outcnt() == 0 ||
621 call->in(0) == nullptr || call->in(0)->is_top()) {
622 return;
623 }
624
625 const TypeTuple* r = call->tf()->domain_cc();
626 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
627 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
628 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
629 return;
630 }
631 }
632
633 if (call->in(TypeFunc::Memory)->is_top()) {
634 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
635 return;
636 }
637 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
638 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
639 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
640 return; // dead path
641 }
642 }
643
644 // check for unreachable loop
645 CallProjections* callprojs = call->extract_projections(true);
646 if ((callprojs->fallthrough_catchproj == call->in(0)) ||
647 (callprojs->catchall_catchproj == call->in(0)) ||
648 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
649 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
650 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
651 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
652 (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
653 return;
654 }
655
656 Compile* C = Compile::current();
657 // Remove inlined methods from Compiler's lists.
658 if (call->is_macro()) {
659 C->remove_macro_node(call);
660 }
661
662
663 bool result_not_used = true;
664 for (uint i = 0; i < callprojs->nb_resproj; i++) {
665 if (callprojs->resproj[i] != nullptr) {
666 if (callprojs->resproj[i]->outcnt() != 0) {
667 result_not_used = false;
668 }
669 if (call->find_edge(callprojs->resproj[i]) != -1) {
670 return;
671 }
672 }
673 }
674
675 if (is_pure_call() && result_not_used) {
676 // The call is marked as pure (no important side effects), but result isn't used.
677 // It's safe to remove the call.
678 GraphKit kit(call->jvms());
679 kit.replace_call(call, C->top(), true);
680 } else {
681 // Make a clone of the JVMState that appropriate to use for driving a parse
682 JVMState* old_jvms = call->jvms();
683 JVMState* jvms = old_jvms->clone_shallow(C);
684 uint size = call->req();
685 SafePointNode* map = new SafePointNode(size, jvms);
686 for (uint i1 = 0; i1 < size; i1++) {
687 map->init_req(i1, call->in(i1));
688 }
689
690 PhaseGVN& gvn = *C->initial_gvn();
691 // Make sure the state is a MergeMem for parsing.
692 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
693 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
694 gvn.set_type_bottom(mem);
695 map->set_req(TypeFunc::Memory, mem);
696 }
697
698 // blow away old call arguments
699 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
700 map->set_req(i1, C->top());
701 }
702 jvms->set_map(map);
703
704 // Make enough space in the expression stack to transfer
705 // the incoming arguments and return value.
706 map->ensure_stack(jvms, jvms->method()->max_stack());
707 const TypeTuple* domain_sig = call->_tf->domain_sig();
708 uint nargs = method()->arg_size();
709 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
710
711 uint j = TypeFunc::Parms;
712 int arg_num = 0;
713 for (uint i1 = 0; i1 < nargs; i1++) {
714 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
715 if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
716 // Inline type arguments are not passed by reference: we get an argument per
717 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
718 GraphKit arg_kit(jvms, &gvn);
719 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
720 map->set_control(arg_kit.control());
721 map->set_argument(jvms, i1, vt);
722 } else {
723 map->set_argument(jvms, i1, call->in(j++));
724 }
725 if (t != Type::HALF) {
726 arg_num++;
727 }
728 }
729
730 C->print_inlining_assert_ready();
731
732 C->print_inlining_move_to(this);
733
734 C->log_late_inline(this);
735
736 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
737 if (!do_late_inline_check(C, jvms)) {
738 map->disconnect_inputs(C);
739 C->print_inlining_update_delayed(this);
740 return;
741 }
742 if (C->print_inlining() && (is_mh_late_inline() || is_virtual_late_inline())) {
743 C->print_inlining_update_delayed(this);
744 }
745
746 // Check if we are late inlining a method handle call that returns an inline type as fields.
747 Node* buffer_oop = nullptr;
748 ciMethod* inline_method = inline_cg()->method();
749 ciType* return_type = inline_method->return_type();
750 if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
751 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
752 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
753 // Do this before the method handle call in case the buffer allocation triggers deoptimization and
754 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
755 GraphKit arg_kit(jvms, &gvn);
756 {
757 PreserveReexecuteState preexecs(&arg_kit);
758 arg_kit.jvms()->set_should_reexecute(true);
759 arg_kit.inc_sp(nargs);
760 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
761 buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
762 }
763 jvms = arg_kit.transfer_exceptions_into_jvms();
764 }
765
766 // Setup default node notes to be picked up by the inlining
767 Node_Notes* old_nn = C->node_notes_at(call->_idx);
768 if (old_nn != nullptr) {
769 Node_Notes* entry_nn = old_nn->clone(C);
770 entry_nn->set_jvms(jvms);
771 C->set_default_node_notes(entry_nn);
772 }
773
774 // Now perform the inlining using the synthesized JVMState
775 JVMState* new_jvms = inline_cg()->generate(jvms);
776 if (new_jvms == nullptr) return; // no change
777 if (C->failing()) return;
778
779 // Capture any exceptional control flow
780 GraphKit kit(new_jvms);
781
782 // Find the result object
783 Node* result = C->top();
784 int result_size = method()->return_type()->size();
785 if (result_size != 0 && !kit.stopped()) {
786 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
787 }
788
789 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
790 result = kit.must_be_not_null(result, false);
791 }
792
793 if (inline_cg()->is_inline()) {
794 C->set_has_loops(C->has_loops() || inline_method->has_loops());
795 C->env()->notice_inlined_method(inline_method);
796 }
797 C->set_inlining_progress(true);
798 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
799
800 // Handle inline type returns
801 InlineTypeNode* vt = result->isa_InlineType();
802 if (vt != nullptr) {
803 if (call->tf()->returns_inline_type_as_fields()) {
804 vt->replace_call_results(&kit, call, C);
805 } else if (vt->is_InlineType()) {
806 // Result might still be allocated (for example, if it has been stored to a non-flat field)
807 if (!vt->is_allocated(&kit.gvn())) {
808 assert(buffer_oop != nullptr, "should have allocated a buffer");
809 RegionNode* region = new RegionNode(3);
810
811 // Check if result is null
812 Node* null_ctl = kit.top();
813 kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
814 region->init_req(1, null_ctl);
815 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
816 Node* init_mem = kit.reset_memory();
817 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
818
819 // Not null, initialize the buffer
820 kit.set_all_memory(init_mem);
821 vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
822 // Do not let stores that initialize this buffer be reordered with a subsequent
823 // store that would make this buffer accessible by other threads.
824 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
825 assert(alloc != nullptr, "must have an allocation node");
826 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
827 region->init_req(2, kit.control());
828 oop->init_req(2, buffer_oop);
829 mem->init_req(2, kit.merged_memory());
830
831 // Update oop input to buffer
832 kit.gvn().hash_delete(vt);
833 vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
834 vt->set_is_buffered(kit.gvn());
835 vt = kit.gvn().transform(vt)->as_InlineType();
836
837 kit.set_control(kit.gvn().transform(region));
838 kit.set_all_memory(kit.gvn().transform(mem));
839 kit.record_for_igvn(region);
840 kit.record_for_igvn(oop);
841 kit.record_for_igvn(mem);
842 }
843 result = vt;
844 }
845 DEBUG_ONLY(buffer_oop = nullptr);
846 } else {
847 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
848 }
849 assert(buffer_oop == nullptr, "unused buffer allocation");
850
851 kit.replace_call(call, result, true);
852 }
853 }
854
855 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
856
857 public:
858 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
859 LateInlineCallGenerator(method, inline_cg) {}
860
861 virtual JVMState* generate(JVMState* jvms) {
862 Compile *C = Compile::current();
863
864 C->log_inline_id(this);
865
866 C->add_string_late_inline(this);
867
868 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
869 return new_jvms;
870 }
1053 // Inline failed, so make a direct call.
1054 assert(_if_hit->is_inline(), "must have been a failed inline");
1055 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1056 new_jvms = cg->generate(kit.sync_jvms());
1057 }
1058 kit.add_exception_states_from(new_jvms);
1059 kit.set_jvms(new_jvms);
1060
1061 // Need to merge slow and fast?
1062 if (slow_map == nullptr) {
1063 // The fast path is the only path remaining.
1064 return kit.transfer_exceptions_into_jvms();
1065 }
1066
1067 if (kit.stopped()) {
1068 // Inlined method threw an exception, so it's just the slow path after all.
1069 kit.set_jvms(slow_jvms);
1070 return kit.transfer_exceptions_into_jvms();
1071 }
1072
1073 // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1074 uint tos = kit.jvms()->stkoff() + kit.sp();
1075 uint limit = slow_map->req();
1076 for (uint i = TypeFunc::Parms; i < limit; i++) {
1077 Node* m = kit.map()->in(i);
1078 Node* n = slow_map->in(i);
1079 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1080 // TODO 8284443 still needed?
1081 if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1082 // Allocate inline type in fast path
1083 m = m->as_InlineType()->buffer(&kit);
1084 kit.map()->set_req(i, m);
1085 }
1086 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1087 // Allocate inline type in slow path
1088 PreserveJVMState pjvms(&kit);
1089 kit.set_map(slow_map);
1090 n = n->as_InlineType()->buffer(&kit);
1091 kit.map()->set_req(i, n);
1092 slow_map = kit.stop();
1093 }
1094 }
1095
1096 // There are 2 branches and the replaced nodes are only valid on
1097 // one: restore the replaced nodes to what they were before the
1098 // branch.
1099 kit.map()->set_replaced_nodes(replaced_nodes);
1100
1101 // Finish the diamond.
1102 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1103 RegionNode* region = new RegionNode(3);
1104 region->init_req(1, kit.control());
1105 region->init_req(2, slow_map->control());
1106 kit.set_control(gvn.transform(region));
1107 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1108 iophi->set_req(2, slow_map->i_o());
1109 kit.set_i_o(gvn.transform(iophi));
1110 // Merge memory
1111 kit.merge_memory(slow_map->merged_memory(), region, 2);
1112 // Transform new memory Phis.
1113 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1114 Node* phi = mms.memory();
1115 if (phi->is_Phi() && phi->in(0) == region) {
1116 mms.set_memory(gvn.transform(phi));
1117 }
1118 }
1119 for (uint i = TypeFunc::Parms; i < limit; i++) {
1120 // Skip unused stack slots; fast forward to monoff();
1121 if (i == tos) {
1122 i = kit.jvms()->monoff();
1123 if( i >= limit ) break;
1124 }
1125 Node* m = kit.map()->in(i);
1126 Node* n = slow_map->in(i);
1127 if (m != n) {
1128 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1129 Node* phi = PhiNode::make(region, m, t);
1130 phi->set_req(2, n);
1131 kit.map()->set_req(i, gvn.transform(phi));
1132 }
1133 }
1134 return kit.transfer_exceptions_into_jvms();
1135 }
1136
1137
1138 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1139 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1140 bool input_not_const;
1141 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1142 Compile* C = Compile::current();
1143 bool should_delay = C->should_delay_inlining();
1144 if (cg != nullptr) {
1145 if (should_delay) {
1146 return CallGenerator::for_late_inline(callee, cg);
1147 } else {
1148 return cg;
1149 }
1150 }
1151 int bci = jvms->bci();
1152 ciCallProfile profile = caller->call_profile_at_bci(bci);
1153 int call_site_count = caller->scale_count(profile.count());
1154
1155 if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1156 (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1157 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1158 } else {
1159 // Out-of-line call.
1160 return CallGenerator::for_direct_call(callee);
1161 }
1162 }
1163
1164 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) {
1165 PhaseGVN& gvn = kit.gvn();
1166 Node* arg = kit.argument(arg_nb);
1167 const Type* arg_type = arg->bottom_type();
1168 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1169 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
1170 const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part
1171 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1172 kit.set_argument(arg_nb, arg);
1173 }
1174 if (sig_type->is_inlinetypeptr()) {
1175 arg = InlineTypeNode::make_from_oop(&kit, arg, sig_type->inline_klass(), !kit.gvn().type(arg)->maybe_null());
1176 kit.set_argument(arg_nb, arg);
1177 }
1178 }
1179
1180 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1181 GraphKit kit(jvms);
1182 PhaseGVN& gvn = kit.gvn();
1183 Compile* C = kit.C;
1184 vmIntrinsics::ID iid = callee->intrinsic_id();
1185 input_not_const = true;
1186 if (StressMethodHandleLinkerInlining) {
1187 allow_inline = false;
1188 }
1189 switch (iid) {
1190 case vmIntrinsics::_invokeBasic:
1191 {
1192 // Get MethodHandle receiver:
1193 Node* receiver = kit.argument(0);
1194 if (receiver->Opcode() == Op_ConP) {
1195 input_not_const = false;
1196 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1197 if (recv_toop != nullptr) {
1198 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1199 const int vtable_index = Method::invalid_vtable_index;
1211 PROB_ALWAYS);
1212 return cg;
1213 } else {
1214 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1215 Type::str(receiver->bottom_type()));
1216 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1217 "receiver is always null");
1218 }
1219 } else {
1220 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1221 "receiver not constant");
1222 }
1223 }
1224 break;
1225
1226 case vmIntrinsics::_linkToVirtual:
1227 case vmIntrinsics::_linkToStatic:
1228 case vmIntrinsics::_linkToSpecial:
1229 case vmIntrinsics::_linkToInterface:
1230 {
1231 int nargs = callee->arg_size();
1232 // Get MemberName argument:
1233 Node* member_name = kit.argument(nargs - 1);
1234 if (member_name->Opcode() == Op_ConP) {
1235 input_not_const = false;
1236 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1237 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1238
1239 if (!ciMethod::is_consistent_info(callee, target)) {
1240 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1241 "signatures mismatch");
1242 return nullptr;
1243 }
1244
1245 // In lambda forms we erase signature types to avoid resolving issues
1246 // involving class loaders. When we optimize a method handle invoke
1247 // to a direct call we must cast the receiver and arguments to its
1248 // actual types.
1249 ciSignature* signature = target->signature();
1250 const int receiver_skip = target->is_static() ? 0 : 1;
1251 // Cast receiver to its type.
1252 if (!target->is_static()) {
1253 cast_argument(nargs, 0, signature->accessing_klass(), kit);
1254 }
1255 // Cast reference arguments to its type.
1256 for (int i = 0, j = 0; i < signature->count(); i++) {
1257 ciType* t = signature->type_at(i);
1258 if (t->is_klass()) {
1259 cast_argument(nargs, receiver_skip + j, t, kit);
1260 }
1261 j += t->size(); // long and double take two slots
1262 }
1263
1264 // Try to get the most accurate receiver type
1265 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1266 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1267 int vtable_index = Method::invalid_vtable_index;
1268 bool call_does_dispatch = false;
1269
1270 ciKlass* speculative_receiver_type = nullptr;
1271 if (is_virtual_or_interface) {
1272 ciInstanceKlass* klass = target->holder();
1273 Node* receiver_node = kit.argument(0);
1274 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1275 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1276 // optimize_virtual_call() takes 2 different holder
1277 // arguments for a corner case that doesn't apply here (see
1278 // Parse::do_call())
1279 target = C->optimize_virtual_call(caller, klass, klass,
1280 target, receiver_type, is_virtual,
1281 call_does_dispatch, vtable_index, // out-parameters
1282 false /* check_access */);
1283 // We lack profiling at this call but type speculation may
1284 // provide us with a type
1285 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1286 }
1287 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1288 allow_inline,
1289 PROB_ALWAYS,
1290 speculative_receiver_type,
1291 true);
1292 return cg;
1293 } else {
1294 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1295 "member_name not constant");
1296 }
1297 }
1298 break;
1299
1300 case vmIntrinsics::_linkToNative:
1301 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1302 "native call");
1303 break;
1304
1305 default:
1306 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1307 break;
1308 }
1309 return nullptr;
1310 }
1311
1346 // do_intrinsic(0)
1347 // else
1348 // if (predicate(1))
1349 // do_intrinsic(1)
1350 // ...
1351 // else
1352 // do_java_comp
1353
1354 GraphKit kit(jvms);
1355 PhaseGVN& gvn = kit.gvn();
1356
1357 CompileLog* log = kit.C->log();
1358 if (log != nullptr) {
1359 log->elem("predicated_intrinsic bci='%d' method='%d'",
1360 jvms->bci(), log->identify(method()));
1361 }
1362
1363 if (!method()->is_static()) {
1364 // We need an explicit receiver null_check before checking its type in predicate.
1365 // We share a map with the caller, so his JVMS gets adjusted.
1366 kit.null_check_receiver_before_call(method());
1367 if (kit.stopped()) {
1368 return kit.transfer_exceptions_into_jvms();
1369 }
1370 }
1371
1372 int n_predicates = _intrinsic->predicates_count();
1373 assert(n_predicates > 0, "sanity");
1374
1375 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1376
1377 // Region for normal compilation code if intrinsic failed.
1378 Node* slow_region = new RegionNode(1);
1379
1380 int results = 0;
1381 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1382 #ifdef ASSERT
1383 JVMState* old_jvms = kit.jvms();
1384 SafePointNode* old_map = kit.map();
1385 Node* old_io = old_map->i_o();
1386 Node* old_mem = old_map->memory();
|