12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMemberName.hpp"
28 #include "ci/ciMethodHandle.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "runtime/os.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/debug.hpp"
44
45 // Utility function.
46 const TypeFunc* CallGenerator::tf() const {
47 return TypeFunc::make(method());
48 }
49
50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
51 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
52 }
53
54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
55 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
56 return is_inlined_method_handle_intrinsic(symbolic_info, m);
101 GraphKit& exits = parser.exits();
102
103 if (C->failing()) {
104 while (exits.pop_exception_state() != nullptr) ;
105 return nullptr;
106 }
107
108 assert(exits.jvms()->same_calls_as(jvms), "sanity");
109
110 // Simply return the exit state of the parser,
111 // augmented by any exceptional states.
112 return exits.transfer_exceptions_into_jvms();
113 }
114
115 //---------------------------DirectCallGenerator------------------------------
116 // Internal class which handles all out-of-line calls w/o receiver type checks.
117 class DirectCallGenerator : public CallGenerator {
118 private:
119 CallStaticJavaNode* _call_node;
120 // Force separate memory and I/O projections for the exceptional
121 // paths to facilitate late inlinig.
122 bool _separate_io_proj;
123
124 protected:
125 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126
127 public:
128 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129 : CallGenerator(method),
130 _separate_io_proj(separate_io_proj)
131 {
132 }
133 virtual JVMState* generate(JVMState* jvms);
134
135 virtual CallNode* call_node() const { return _call_node; }
136 virtual CallGenerator* with_call_node(CallNode* call) {
137 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138 dcg->set_call_node(call->as_CallStaticJava());
139 return dcg;
140 }
141 };
142
143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144 GraphKit kit(jvms);
145 bool is_static = method()->is_static();
146 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
147 : SharedRuntime::get_resolve_opt_virtual_call_stub();
148
149 if (kit.C->log() != nullptr) {
150 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
151 }
152
153 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
154 if (is_inlined_method_handle_intrinsic(jvms, method())) {
155 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
156 // additional information about the method being invoked should be attached
157 // to the call site to make resolution logic work
158 // (see SharedRuntime::resolve_static_call_C).
159 call->set_override_symbolic_info(true);
160 }
161 _call_node = call; // Save the call node in case we need it later
162 if (!is_static) {
163 // Make an explicit receiver null_check as part of this call.
164 // Since we share a map with the caller, his JVMS gets adjusted.
165 kit.null_check_receiver_before_call(method());
166 if (kit.stopped()) {
167 // And dump it back to the caller, decorated with any exceptions:
168 return kit.transfer_exceptions_into_jvms();
169 }
170 // Mark the call node as virtual, sort of:
171 call->set_optimized_virtual(true);
172 }
173 kit.set_arguments_for_java_call(call);
174 kit.set_edges_for_java_call(call, false, _separate_io_proj);
175 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
176 kit.push_node(method()->return_type()->basic_type(), ret);
177 return kit.transfer_exceptions_into_jvms();
178 }
179
180 //--------------------------VirtualCallGenerator------------------------------
181 // Internal class which handles all out-of-line calls checking receiver type.
182 class VirtualCallGenerator : public CallGenerator {
183 private:
184 int _vtable_index;
185 bool _separate_io_proj;
186 CallDynamicJavaNode* _call_node;
187
188 protected:
189 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
190
191 public:
192 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
193 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
194 {
195 assert(vtable_index == Method::invalid_vtable_index ||
196 vtable_index >= 0, "either invalid or usable");
197 }
198 virtual bool is_virtual() const { return true; }
199 virtual JVMState* generate(JVMState* jvms);
200
201 virtual CallNode* call_node() const { return _call_node; }
202 int vtable_index() const { return _vtable_index; }
203
204 virtual CallGenerator* with_call_node(CallNode* call) {
205 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
206 cg->set_call_node(call->as_CallDynamicJava());
207 return cg;
208 }
209 };
210
211 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
212 GraphKit kit(jvms);
213 Node* receiver = kit.argument(0);
214
215 if (kit.C->log() != nullptr) {
216 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
217 }
218
219 // If the receiver is a constant null, do not torture the system
220 // by attempting to call through it. The compile will proceed
221 // correctly, but may bail out in final_graph_reshaping, because
222 // the call instruction will have a seemingly deficient out-count.
223 // (The bailout says something misleading about an "infinite loop".)
224 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
225 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
226 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
227 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
228 kit.inc_sp(arg_size); // restore arguments
229 kit.uncommon_trap(Deoptimization::Reason_null_check,
230 Deoptimization::Action_none,
231 nullptr, "null receiver");
232 return kit.transfer_exceptions_into_jvms();
233 }
234
252 }
253
254 assert(!method()->is_static(), "virtual call must not be to static");
255 assert(!method()->is_final(), "virtual call should not be to final");
256 assert(!method()->is_private(), "virtual call should not be to private");
257 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
258 "no vtable calls if +UseInlineCaches ");
259 address target = SharedRuntime::get_resolve_virtual_call_stub();
260 // Normal inline cache used for call
261 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
262 if (is_inlined_method_handle_intrinsic(jvms, method())) {
263 // To be able to issue a direct call (optimized virtual or virtual)
264 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
265 // about the method being invoked should be attached to the call site to
266 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
267 call->set_override_symbolic_info(true);
268 }
269 _call_node = call; // Save the call node in case we need it later
270
271 kit.set_arguments_for_java_call(call);
272 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
273 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
274 kit.push_node(method()->return_type()->basic_type(), ret);
275
276 // Represent the effect of an implicit receiver null_check
277 // as part of this call. Since we share a map with the caller,
278 // his JVMS gets adjusted.
279 kit.cast_not_null(receiver);
280 return kit.transfer_exceptions_into_jvms();
281 }
282
283 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
284 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
285 return new ParseGenerator(m, expected_uses);
286 }
287
288 // As a special case, the JVMS passed to this CallGenerator is
289 // for the method execution already in progress, not just the JVMS
290 // of the caller. Thus, this CallGenerator cannot be mixed with others!
291 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
336 // parse is finished.
337 if (!is_mh_late_inline()) {
338 C->add_late_inline(this);
339 }
340
341 // Emit the CallStaticJava and request separate projections so
342 // that the late inlining logic can distinguish between fall
343 // through and exceptional uses of the memory and io projections
344 // as is done for allocations and macro expansion.
345 return DirectCallGenerator::generate(jvms);
346 }
347
348 virtual void set_unique_id(jlong id) {
349 _unique_id = id;
350 }
351
352 virtual jlong unique_id() const {
353 return _unique_id;
354 }
355
356 virtual CallGenerator* with_call_node(CallNode* call) {
357 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
358 cg->set_call_node(call->as_CallStaticJava());
359 return cg;
360 }
361 };
362
363 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
364 return new LateInlineCallGenerator(method, inline_cg);
365 }
366
367 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
368 ciMethod* _caller;
369 bool _input_not_const;
370
371 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
372
373 public:
374 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
375 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
397 cg->set_call_node(call->as_CallStaticJava());
398 return cg;
399 }
400 };
401
402 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
403 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
404 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
405 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
406 // of late inlining with exceptions.
407 assert(!jvms->method()->has_exception_handlers() ||
408 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
409 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
410 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
411 bool allow_inline = C->inlining_incrementally();
412 bool input_not_const = true;
413 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
414 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
415
416 if (cg != nullptr) {
417 if (!allow_inline) {
418 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
419 "late method handle call resolution");
420 }
421 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
422 _inline_cg = cg;
423 return true;
424 } else {
425 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
426 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
427 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
428 return false;
429 }
430 }
431
432 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
433 assert(IncrementalInlineMH, "required");
434 Compile::current()->mark_has_mh_late_inlines();
435 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
436 return cg;
556
557 void LateInlineMHCallGenerator::do_late_inline() {
558 CallGenerator::do_late_inline_helper();
559 }
560
561 void LateInlineVirtualCallGenerator::do_late_inline() {
562 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
563 CallGenerator::do_late_inline_helper();
564 }
565
566 void CallGenerator::do_late_inline_helper() {
567 assert(is_late_inline(), "only late inline allowed");
568
569 // Can't inline it
570 CallNode* call = call_node();
571 if (call == nullptr || call->outcnt() == 0 ||
572 call->in(0) == nullptr || call->in(0)->is_top()) {
573 return;
574 }
575
576 const TypeTuple *r = call->tf()->domain();
577 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
578 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
579 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
580 return;
581 }
582 }
583
584 if (call->in(TypeFunc::Memory)->is_top()) {
585 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
586 return;
587 }
588 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
589 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
590 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
591 return; // dead path
592 }
593 }
594
595 // check for unreachable loop
596 CallProjections callprojs;
597 // Similar to incremental inlining, don't assert that all call
598 // projections are still there for post-parse call devirtualization.
599 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
600 call->extract_projections(&callprojs, true, do_asserts);
601 if ((callprojs.fallthrough_catchproj == call->in(0)) ||
602 (callprojs.catchall_catchproj == call->in(0)) ||
603 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
604 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
605 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
606 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
607 (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
608 (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) {
609 return;
610 }
611
612 Compile* C = Compile::current();
613 // Remove inlined methods from Compiler's lists.
614 if (call->is_macro()) {
615 C->remove_macro_node(call);
616 }
617
618 // The call is marked as pure (no important side effects), but result isn't used.
619 // It's safe to remove the call.
620 bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
621
622 if (is_pure_call() && result_not_used) {
623 GraphKit kit(call->jvms());
624 kit.replace_call(call, C->top(), true, do_asserts);
625 } else {
626 // Make a clone of the JVMState that appropriate to use for driving a parse
627 JVMState* old_jvms = call->jvms();
628 JVMState* jvms = old_jvms->clone_shallow(C);
629 uint size = call->req();
630 SafePointNode* map = new SafePointNode(size, jvms);
631 for (uint i1 = 0; i1 < size; i1++) {
632 map->init_req(i1, call->in(i1));
633 }
634
635 // Make sure the state is a MergeMem for parsing.
636 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
637 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
638 C->initial_gvn()->set_type_bottom(mem);
639 map->set_req(TypeFunc::Memory, mem);
640 }
641
642 uint nargs = method()->arg_size();
643 // blow away old call arguments
644 Node* top = C->top();
645 for (uint i1 = 0; i1 < nargs; i1++) {
646 map->set_req(TypeFunc::Parms + i1, top);
647 }
648 jvms->set_map(map);
649
650 // Make enough space in the expression stack to transfer
651 // the incoming arguments and return value.
652 map->ensure_stack(jvms, jvms->method()->max_stack());
653 for (uint i1 = 0; i1 < nargs; i1++) {
654 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
655 }
656
657 C->log_late_inline(this);
658
659 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
660 if (!do_late_inline_check(C, jvms)) {
661 map->disconnect_inputs(C);
662 return;
663 }
664
665 // Setup default node notes to be picked up by the inlining
666 Node_Notes* old_nn = C->node_notes_at(call->_idx);
667 if (old_nn != nullptr) {
668 Node_Notes* entry_nn = old_nn->clone(C);
669 entry_nn->set_jvms(jvms);
670 C->set_default_node_notes(entry_nn);
671 }
672
673 // Now perform the inlining using the synthesized JVMState
674 JVMState* new_jvms = inline_cg()->generate(jvms);
675 if (new_jvms == nullptr) return; // no change
676 if (C->failing()) return;
677
678 if (is_mh_late_inline()) {
679 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
680 } else if (is_string_late_inline()) {
681 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
682 } else if (is_boxing_late_inline()) {
683 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
684 } else if (is_vector_reboxing_late_inline()) {
685 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
686 } else {
687 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
688 }
689
690 // Capture any exceptional control flow
691 GraphKit kit(new_jvms);
692
693 // Find the result object
694 Node* result = C->top();
695 int result_size = method()->return_type()->size();
696 if (result_size != 0 && !kit.stopped()) {
697 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
698 }
699
700 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
701 result = kit.must_be_not_null(result, false);
702 }
703
704 if (inline_cg()->is_inline()) {
705 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
706 C->env()->notice_inlined_method(inline_cg()->method());
707 }
708 C->set_inlining_progress(true);
709 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
710 kit.replace_call(call, result, true, do_asserts);
711 }
712 }
713
714 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
715
716 public:
717 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
718 LateInlineCallGenerator(method, inline_cg) {}
719
720 virtual JVMState* generate(JVMState* jvms) {
721 Compile *C = Compile::current();
722
723 C->log_inline_id(this);
724
725 C->add_string_late_inline(this);
726
727 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
728 return new_jvms;
729 }
918 // Inline failed, so make a direct call.
919 assert(_if_hit->is_inline(), "must have been a failed inline");
920 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
921 new_jvms = cg->generate(kit.sync_jvms());
922 }
923 kit.add_exception_states_from(new_jvms);
924 kit.set_jvms(new_jvms);
925
926 // Need to merge slow and fast?
927 if (slow_map == nullptr) {
928 // The fast path is the only path remaining.
929 return kit.transfer_exceptions_into_jvms();
930 }
931
932 if (kit.stopped()) {
933 // Inlined method threw an exception, so it's just the slow path after all.
934 kit.set_jvms(slow_jvms);
935 return kit.transfer_exceptions_into_jvms();
936 }
937
938 // There are 2 branches and the replaced nodes are only valid on
939 // one: restore the replaced nodes to what they were before the
940 // branch.
941 kit.map()->set_replaced_nodes(replaced_nodes);
942
943 // Finish the diamond.
944 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
945 RegionNode* region = new RegionNode(3);
946 region->init_req(1, kit.control());
947 region->init_req(2, slow_map->control());
948 kit.set_control(gvn.transform(region));
949 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
950 iophi->set_req(2, slow_map->i_o());
951 kit.set_i_o(gvn.transform(iophi));
952 // Merge memory
953 kit.merge_memory(slow_map->merged_memory(), region, 2);
954 // Transform new memory Phis.
955 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
956 Node* phi = mms.memory();
957 if (phi->is_Phi() && phi->in(0) == region) {
958 mms.set_memory(gvn.transform(phi));
959 }
960 }
961 uint tos = kit.jvms()->stkoff() + kit.sp();
962 uint limit = slow_map->req();
963 for (uint i = TypeFunc::Parms; i < limit; i++) {
964 // Skip unused stack slots; fast forward to monoff();
965 if (i == tos) {
966 i = kit.jvms()->monoff();
967 if( i >= limit ) break;
968 }
969 Node* m = kit.map()->in(i);
970 Node* n = slow_map->in(i);
971 if (m != n) {
972 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
973 Node* phi = PhiNode::make(region, m, t);
974 phi->set_req(2, n);
975 kit.map()->set_req(i, gvn.transform(phi));
976 }
977 }
978 return kit.transfer_exceptions_into_jvms();
979 }
980
981
982 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
983 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
984 bool input_not_const;
985 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
986 Compile* C = Compile::current();
987 bool should_delay = C->should_delay_inlining();
988 if (cg != nullptr) {
989 if (should_delay && IncrementalInlineMH) {
990 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
991 } else {
992 return cg;
993 }
994 }
995 int bci = jvms->bci();
996 ciCallProfile profile = caller->call_profile_at_bci(bci);
997 int call_site_count = caller->scale_count(profile.count());
998
999 if (IncrementalInlineMH && call_site_count > 0 &&
1000 (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1001 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1002 } else {
1003 // Out-of-line call.
1004 return CallGenerator::for_direct_call(callee);
1005 }
1006 }
1007
1008 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1009 GraphKit kit(jvms);
1010 PhaseGVN& gvn = kit.gvn();
1011 Compile* C = kit.C;
1012 vmIntrinsics::ID iid = callee->intrinsic_id();
1013 input_not_const = true;
1014 if (StressMethodHandleLinkerInlining) {
1015 allow_inline = false;
1016 }
1017 switch (iid) {
1018 case vmIntrinsics::_invokeBasic:
1019 {
1020 // Get MethodHandle receiver:
1021 Node* receiver = kit.argument(0);
1022 if (receiver->Opcode() == Op_ConP) {
1023 input_not_const = false;
1024 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1025 if (recv_toop != nullptr) {
1026 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1027 const int vtable_index = Method::invalid_vtable_index;
1035 false /* call_does_dispatch */,
1036 jvms,
1037 allow_inline,
1038 PROB_ALWAYS);
1039 return cg;
1040 } else {
1041 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1042 Type::str(receiver->bottom_type()));
1043 print_inlining_failure(C, callee, jvms, "receiver is always null");
1044 }
1045 } else {
1046 print_inlining_failure(C, callee, jvms, "receiver not constant");
1047 }
1048 } break;
1049
1050 case vmIntrinsics::_linkToVirtual:
1051 case vmIntrinsics::_linkToStatic:
1052 case vmIntrinsics::_linkToSpecial:
1053 case vmIntrinsics::_linkToInterface:
1054 {
1055 // Get MemberName argument:
1056 Node* member_name = kit.argument(callee->arg_size() - 1);
1057 if (member_name->Opcode() == Op_ConP) {
1058 input_not_const = false;
1059 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1060 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1061
1062 if (!ciMethod::is_consistent_info(callee, target)) {
1063 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1064 return nullptr;
1065 }
1066
1067 // In lambda forms we erase signature types to avoid resolving issues
1068 // involving class loaders. When we optimize a method handle invoke
1069 // to a direct call we must cast the receiver and arguments to its
1070 // actual types.
1071 ciSignature* signature = target->signature();
1072 const int receiver_skip = target->is_static() ? 0 : 1;
1073 // Cast receiver to its type.
1074 if (!target->is_static()) {
1075 Node* recv = kit.argument(0);
1076 Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1106 ciKlass* speculative_receiver_type = nullptr;
1107 if (is_virtual_or_interface) {
1108 ciInstanceKlass* klass = target->holder();
1109 Node* receiver_node = kit.argument(0);
1110 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1111 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1112 // optimize_virtual_call() takes 2 different holder
1113 // arguments for a corner case that doesn't apply here (see
1114 // Parse::do_call())
1115 target = C->optimize_virtual_call(caller, klass, klass,
1116 target, receiver_type, is_virtual,
1117 call_does_dispatch, vtable_index, // out-parameters
1118 false /* check_access */);
1119 // We lack profiling at this call but type speculation may
1120 // provide us with a type
1121 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1122 }
1123 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1124 allow_inline,
1125 PROB_ALWAYS,
1126 speculative_receiver_type);
1127 return cg;
1128 } else {
1129 print_inlining_failure(C, callee, jvms, "member_name not constant");
1130 }
1131 } break;
1132
1133 case vmIntrinsics::_linkToNative:
1134 print_inlining_failure(C, callee, jvms, "native call");
1135 break;
1136
1137 default:
1138 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1139 break;
1140 }
1141 return nullptr;
1142 }
1143
1144 //------------------------PredicatedIntrinsicGenerator------------------------------
1145 // Internal class which handles all predicated Intrinsic calls.
1146 class PredicatedIntrinsicGenerator : public CallGenerator {
1178 // do_intrinsic(0)
1179 // else
1180 // if (predicate(1))
1181 // do_intrinsic(1)
1182 // ...
1183 // else
1184 // do_java_comp
1185
1186 GraphKit kit(jvms);
1187 PhaseGVN& gvn = kit.gvn();
1188
1189 CompileLog* log = kit.C->log();
1190 if (log != nullptr) {
1191 log->elem("predicated_intrinsic bci='%d' method='%d'",
1192 jvms->bci(), log->identify(method()));
1193 }
1194
1195 if (!method()->is_static()) {
1196 // We need an explicit receiver null_check before checking its type in predicate.
1197 // We share a map with the caller, so his JVMS gets adjusted.
1198 Node* receiver = kit.null_check_receiver_before_call(method());
1199 if (kit.stopped()) {
1200 return kit.transfer_exceptions_into_jvms();
1201 }
1202 }
1203
1204 int n_predicates = _intrinsic->predicates_count();
1205 assert(n_predicates > 0, "sanity");
1206
1207 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1208
1209 // Region for normal compilation code if intrinsic failed.
1210 Node* slow_region = new RegionNode(1);
1211
1212 int results = 0;
1213 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1214 #ifdef ASSERT
1215 JVMState* old_jvms = kit.jvms();
1216 SafePointNode* old_map = kit.map();
1217 Node* old_io = old_map->i_o();
1218 Node* old_mem = old_map->memory();
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMemberName.hpp"
28 #include "ci/ciMethodHandle.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/subnode.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/debug.hpp"
46
47 // Utility function.
48 const TypeFunc* CallGenerator::tf() const {
49 return TypeFunc::make(method());
50 }
51
52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
54 }
55
56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
57 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
58 return is_inlined_method_handle_intrinsic(symbolic_info, m);
103 GraphKit& exits = parser.exits();
104
105 if (C->failing()) {
106 while (exits.pop_exception_state() != nullptr) ;
107 return nullptr;
108 }
109
110 assert(exits.jvms()->same_calls_as(jvms), "sanity");
111
112 // Simply return the exit state of the parser,
113 // augmented by any exceptional states.
114 return exits.transfer_exceptions_into_jvms();
115 }
116
117 //---------------------------DirectCallGenerator------------------------------
118 // Internal class which handles all out-of-line calls w/o receiver type checks.
119 class DirectCallGenerator : public CallGenerator {
120 private:
121 CallStaticJavaNode* _call_node;
122 // Force separate memory and I/O projections for the exceptional
123 // paths to facilitate late inlining.
124 bool _separate_io_proj;
125
126 protected:
127 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _call_node(nullptr),
133 _separate_io_proj(separate_io_proj)
134 {
135 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
136 // If that call has not been optimized by the time optimizations are over,
137 // we'll need to add a call to create an inline type instance from the klass
138 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
139 // Separating memory and I/O projections for exceptions is required to
140 // perform that graph transformation.
141 _separate_io_proj = true;
142 }
143 }
144 virtual JVMState* generate(JVMState* jvms);
145
146 virtual CallNode* call_node() const { return _call_node; }
147 virtual CallGenerator* with_call_node(CallNode* call) {
148 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
149 dcg->set_call_node(call->as_CallStaticJava());
150 return dcg;
151 }
152 };
153
154 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
155 GraphKit kit(jvms);
156 PhaseGVN& gvn = kit.gvn();
157 bool is_static = method()->is_static();
158 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
159 : SharedRuntime::get_resolve_opt_virtual_call_stub();
160
161 if (kit.C->log() != nullptr) {
162 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
163 }
164
165 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
166 if (is_inlined_method_handle_intrinsic(jvms, method())) {
167 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
168 // additional information about the method being invoked should be attached
169 // to the call site to make resolution logic work
170 // (see SharedRuntime::resolve_static_call_C).
171 call->set_override_symbolic_info(true);
172 }
173 _call_node = call; // Save the call node in case we need it later
174 if (!is_static) {
175 // Make an explicit receiver null_check as part of this call.
176 // Since we share a map with the caller, his JVMS gets adjusted.
177 kit.null_check_receiver_before_call(method());
178 if (kit.stopped()) {
179 // And dump it back to the caller, decorated with any exceptions:
180 return kit.transfer_exceptions_into_jvms();
181 }
182 // Mark the call node as virtual, sort of:
183 call->set_optimized_virtual(true);
184 }
185 kit.set_arguments_for_java_call(call, is_late_inline());
186 if (kit.stopped()) {
187 return kit.transfer_exceptions_into_jvms();
188 }
189 kit.set_edges_for_java_call(call, false, _separate_io_proj);
190 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
191 kit.push_node(method()->return_type()->basic_type(), ret);
192 return kit.transfer_exceptions_into_jvms();
193 }
194
195 //--------------------------VirtualCallGenerator------------------------------
196 // Internal class which handles all out-of-line calls checking receiver type.
197 class VirtualCallGenerator : public CallGenerator {
198 private:
199 int _vtable_index;
200 bool _separate_io_proj;
201 CallDynamicJavaNode* _call_node;
202
203 protected:
204 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
205
206 public:
207 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
208 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
209 {
210 assert(vtable_index == Method::invalid_vtable_index ||
211 vtable_index >= 0, "either invalid or usable");
212 }
213 virtual bool is_virtual() const { return true; }
214 virtual JVMState* generate(JVMState* jvms);
215
216 virtual CallNode* call_node() const { return _call_node; }
217 int vtable_index() const { return _vtable_index; }
218
219 virtual CallGenerator* with_call_node(CallNode* call) {
220 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
221 cg->set_call_node(call->as_CallDynamicJava());
222 return cg;
223 }
224 };
225
226 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
227 GraphKit kit(jvms);
228 Node* receiver = kit.argument(0);
229 if (kit.C->log() != nullptr) {
230 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
231 }
232
233 // If the receiver is a constant null, do not torture the system
234 // by attempting to call through it. The compile will proceed
235 // correctly, but may bail out in final_graph_reshaping, because
236 // the call instruction will have a seemingly deficient out-count.
237 // (The bailout says something misleading about an "infinite loop".)
238 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
239 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
240 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
241 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
242 kit.inc_sp(arg_size); // restore arguments
243 kit.uncommon_trap(Deoptimization::Reason_null_check,
244 Deoptimization::Action_none,
245 nullptr, "null receiver");
246 return kit.transfer_exceptions_into_jvms();
247 }
248
266 }
267
268 assert(!method()->is_static(), "virtual call must not be to static");
269 assert(!method()->is_final(), "virtual call should not be to final");
270 assert(!method()->is_private(), "virtual call should not be to private");
271 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
272 "no vtable calls if +UseInlineCaches ");
273 address target = SharedRuntime::get_resolve_virtual_call_stub();
274 // Normal inline cache used for call
275 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
276 if (is_inlined_method_handle_intrinsic(jvms, method())) {
277 // To be able to issue a direct call (optimized virtual or virtual)
278 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
279 // about the method being invoked should be attached to the call site to
280 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
281 call->set_override_symbolic_info(true);
282 }
283 _call_node = call; // Save the call node in case we need it later
284
285 kit.set_arguments_for_java_call(call);
286 if (kit.stopped()) {
287 return kit.transfer_exceptions_into_jvms();
288 }
289 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
290 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
291 kit.push_node(method()->return_type()->basic_type(), ret);
292
293 // Represent the effect of an implicit receiver null_check
294 // as part of this call. Since we share a map with the caller,
295 // his JVMS gets adjusted.
296 kit.cast_not_null(receiver);
297 return kit.transfer_exceptions_into_jvms();
298 }
299
300 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
301 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
302 return new ParseGenerator(m, expected_uses);
303 }
304
305 // As a special case, the JVMS passed to this CallGenerator is
306 // for the method execution already in progress, not just the JVMS
307 // of the caller. Thus, this CallGenerator cannot be mixed with others!
308 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
353 // parse is finished.
354 if (!is_mh_late_inline()) {
355 C->add_late_inline(this);
356 }
357
358 // Emit the CallStaticJava and request separate projections so
359 // that the late inlining logic can distinguish between fall
360 // through and exceptional uses of the memory and io projections
361 // as is done for allocations and macro expansion.
362 return DirectCallGenerator::generate(jvms);
363 }
364
365 virtual void set_unique_id(jlong id) {
366 _unique_id = id;
367 }
368
369 virtual jlong unique_id() const {
370 return _unique_id;
371 }
372
373 virtual CallGenerator* inline_cg() {
374 return _inline_cg;
375 }
376
377 virtual CallGenerator* with_call_node(CallNode* call) {
378 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
379 cg->set_call_node(call->as_CallStaticJava());
380 return cg;
381 }
382 };
383
384 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
385 return new LateInlineCallGenerator(method, inline_cg);
386 }
387
388 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
389 ciMethod* _caller;
390 bool _input_not_const;
391
392 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
393
394 public:
395 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
396 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
418 cg->set_call_node(call->as_CallStaticJava());
419 return cg;
420 }
421 };
422
423 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
424 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
425 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
426 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
427 // of late inlining with exceptions.
428 assert(!jvms->method()->has_exception_handlers() ||
429 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
430 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
431 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
432 bool allow_inline = C->inlining_incrementally();
433 bool input_not_const = true;
434 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
435 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
436
437 if (cg != nullptr) {
438 // AlwaysIncrementalInline causes for_method_handle_inline() to
439 // return a LateInlineCallGenerator. Extract the
440 // InlineCallGenerator from it.
441 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
442 cg = cg->inline_cg();
443 assert(cg != nullptr, "inline call generator expected");
444 }
445
446 if (!allow_inline) {
447 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
448 "late method handle call resolution");
449 }
450 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
451 _inline_cg = cg;
452 return true;
453 } else {
454 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
455 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
456 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
457 return false;
458 }
459 }
460
461 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
462 assert(IncrementalInlineMH, "required");
463 Compile::current()->mark_has_mh_late_inlines();
464 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
465 return cg;
585
586 void LateInlineMHCallGenerator::do_late_inline() {
587 CallGenerator::do_late_inline_helper();
588 }
589
590 void LateInlineVirtualCallGenerator::do_late_inline() {
591 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
592 CallGenerator::do_late_inline_helper();
593 }
594
595 void CallGenerator::do_late_inline_helper() {
596 assert(is_late_inline(), "only late inline allowed");
597
598 // Can't inline it
599 CallNode* call = call_node();
600 if (call == nullptr || call->outcnt() == 0 ||
601 call->in(0) == nullptr || call->in(0)->is_top()) {
602 return;
603 }
604
605 const TypeTuple* r = call->tf()->domain_cc();
606 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
607 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
608 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
609 return;
610 }
611 }
612
613 if (call->in(TypeFunc::Memory)->is_top()) {
614 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
615 return;
616 }
617 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
618 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
619 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
620 return; // dead path
621 }
622 }
623
624 // check for unreachable loop
625 // Similar to incremental inlining, don't assert that all call
626 // projections are still there for post-parse call devirtualization.
627 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
628 CallProjections* callprojs = call->extract_projections(true, do_asserts);
629 if ((callprojs->fallthrough_catchproj == call->in(0)) ||
630 (callprojs->catchall_catchproj == call->in(0)) ||
631 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
632 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
633 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
634 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
635 (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
636 return;
637 }
638
639 Compile* C = Compile::current();
640 // Remove inlined methods from Compiler's lists.
641 if (call->is_macro()) {
642 C->remove_macro_node(call);
643 }
644
645
646 bool result_not_used = true;
647 for (uint i = 0; i < callprojs->nb_resproj; i++) {
648 if (callprojs->resproj[i] != nullptr) {
649 if (callprojs->resproj[i]->outcnt() != 0) {
650 result_not_used = false;
651 }
652 if (call->find_edge(callprojs->resproj[i]) != -1) {
653 return;
654 }
655 }
656 }
657
658 if (is_pure_call() && result_not_used) {
659 // The call is marked as pure (no important side effects), but result isn't used.
660 // It's safe to remove the call.
661 GraphKit kit(call->jvms());
662 kit.replace_call(call, C->top(), true, do_asserts);
663 } else {
664 // Make a clone of the JVMState that appropriate to use for driving a parse
665 JVMState* old_jvms = call->jvms();
666 JVMState* jvms = old_jvms->clone_shallow(C);
667 uint size = call->req();
668 SafePointNode* map = new SafePointNode(size, jvms);
669 for (uint i1 = 0; i1 < size; i1++) {
670 map->init_req(i1, call->in(i1));
671 }
672
673 PhaseGVN& gvn = *C->initial_gvn();
674 // Make sure the state is a MergeMem for parsing.
675 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
676 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
677 gvn.set_type_bottom(mem);
678 map->set_req(TypeFunc::Memory, mem);
679 }
680
681 // blow away old call arguments
682 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
683 map->set_req(i1, C->top());
684 }
685 jvms->set_map(map);
686
687 // Make enough space in the expression stack to transfer
688 // the incoming arguments and return value.
689 map->ensure_stack(jvms, jvms->method()->max_stack());
690 const TypeTuple* domain_sig = call->_tf->domain_sig();
691 uint nargs = method()->arg_size();
692 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
693
694 uint j = TypeFunc::Parms;
695 int arg_num = 0;
696 for (uint i1 = 0; i1 < nargs; i1++) {
697 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
698 if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
699 // Inline type arguments are not passed by reference: we get an argument per
700 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
701 GraphKit arg_kit(jvms, &gvn);
702 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
703 map->set_control(arg_kit.control());
704 map->set_argument(jvms, i1, vt);
705 } else {
706 map->set_argument(jvms, i1, call->in(j++));
707 }
708 if (t != Type::HALF) {
709 arg_num++;
710 }
711 }
712
713 C->log_late_inline(this);
714
715 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
716 if (!do_late_inline_check(C, jvms)) {
717 map->disconnect_inputs(C);
718 return;
719 }
720
721 // Check if we are late inlining a method handle call that returns an inline type as fields.
722 Node* buffer_oop = nullptr;
723 ciMethod* inline_method = inline_cg()->method();
724 ciType* return_type = inline_method->return_type();
725 if (!call->tf()->returns_inline_type_as_fields() &&
726 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
727 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
728 // Do this before the method handle call in case the buffer allocation triggers deoptimization and
729 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
730 GraphKit arg_kit(jvms, &gvn);
731 {
732 PreserveReexecuteState preexecs(&arg_kit);
733 arg_kit.jvms()->set_should_reexecute(true);
734 arg_kit.inc_sp(nargs);
735 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
736 buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
737 }
738 jvms = arg_kit.transfer_exceptions_into_jvms();
739 }
740
741 // Setup default node notes to be picked up by the inlining
742 Node_Notes* old_nn = C->node_notes_at(call->_idx);
743 if (old_nn != nullptr) {
744 Node_Notes* entry_nn = old_nn->clone(C);
745 entry_nn->set_jvms(jvms);
746 C->set_default_node_notes(entry_nn);
747 }
748
749 // Now perform the inlining using the synthesized JVMState
750 JVMState* new_jvms = inline_cg()->generate(jvms);
751 if (new_jvms == nullptr) return; // no change
752 if (C->failing()) return;
753
754 if (is_mh_late_inline()) {
755 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
756 } else if (is_string_late_inline()) {
757 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
758 } else if (is_boxing_late_inline()) {
759 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
760 } else if (is_vector_reboxing_late_inline()) {
761 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
762 } else {
763 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
764 }
765
766 // Capture any exceptional control flow
767 GraphKit kit(new_jvms);
768
769 // Find the result object
770 Node* result = C->top();
771 int result_size = method()->return_type()->size();
772 if (result_size != 0 && !kit.stopped()) {
773 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
774 }
775
776 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
777 result = kit.must_be_not_null(result, false);
778 }
779
780 if (inline_cg()->is_inline()) {
781 C->set_has_loops(C->has_loops() || inline_method->has_loops());
782 C->env()->notice_inlined_method(inline_method);
783 }
784 C->set_inlining_progress(true);
785 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
786
787 // Handle inline type returns
788 InlineTypeNode* vt = result->isa_InlineType();
789 if (vt != nullptr) {
790 if (call->tf()->returns_inline_type_as_fields()) {
791 vt->replace_call_results(&kit, call, C);
792 } else {
793 // Result might still be allocated (for example, if it has been stored to a non-flat field)
794 if (!vt->is_allocated(&kit.gvn())) {
795 assert(buffer_oop != nullptr, "should have allocated a buffer");
796 RegionNode* region = new RegionNode(3);
797
798 // Check if result is null
799 Node* null_ctl = kit.top();
800 kit.null_check_common(vt->get_null_marker(), T_INT, false, &null_ctl);
801 region->init_req(1, null_ctl);
802 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
803 Node* init_mem = kit.reset_memory();
804 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
805
806 // Not null, initialize the buffer
807 kit.set_all_memory(init_mem);
808
809 Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
810 vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
811 // Do not let stores that initialize this buffer be reordered with a subsequent
812 // store that would make this buffer accessible by other threads.
813 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
814 assert(alloc != nullptr, "must have an allocation node");
815 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
816 region->init_req(2, kit.control());
817 oop->init_req(2, buffer_oop);
818 mem->init_req(2, kit.merged_memory());
819
820 // Update oop input to buffer
821 kit.gvn().hash_delete(vt);
822 vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
823 vt->set_is_buffered(kit.gvn());
824 vt = kit.gvn().transform(vt)->as_InlineType();
825
826 kit.set_control(kit.gvn().transform(region));
827 kit.set_all_memory(kit.gvn().transform(mem));
828 kit.record_for_igvn(region);
829 kit.record_for_igvn(oop);
830 kit.record_for_igvn(mem);
831 }
832 result = vt;
833 }
834 DEBUG_ONLY(buffer_oop = nullptr);
835 } else {
836 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields() || !call->as_CallJava()->method()->return_type()->is_loaded(), "Unexpected return value");
837 }
838 assert(buffer_oop == nullptr, "unused buffer allocation");
839
840 kit.replace_call(call, result, true, do_asserts);
841 }
842 }
843
844 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
845
846 public:
847 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
848 LateInlineCallGenerator(method, inline_cg) {}
849
850 virtual JVMState* generate(JVMState* jvms) {
851 Compile *C = Compile::current();
852
853 C->log_inline_id(this);
854
855 C->add_string_late_inline(this);
856
857 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
858 return new_jvms;
859 }
1048 // Inline failed, so make a direct call.
1049 assert(_if_hit->is_inline(), "must have been a failed inline");
1050 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1051 new_jvms = cg->generate(kit.sync_jvms());
1052 }
1053 kit.add_exception_states_from(new_jvms);
1054 kit.set_jvms(new_jvms);
1055
1056 // Need to merge slow and fast?
1057 if (slow_map == nullptr) {
1058 // The fast path is the only path remaining.
1059 return kit.transfer_exceptions_into_jvms();
1060 }
1061
1062 if (kit.stopped()) {
1063 // Inlined method threw an exception, so it's just the slow path after all.
1064 kit.set_jvms(slow_jvms);
1065 return kit.transfer_exceptions_into_jvms();
1066 }
1067
1068 // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1069 uint tos = kit.jvms()->stkoff() + kit.sp();
1070 uint limit = slow_map->req();
1071 for (uint i = TypeFunc::Parms; i < limit; i++) {
1072 Node* m = kit.map()->in(i);
1073 Node* n = slow_map->in(i);
1074 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1075 // TODO 8284443 still needed?
1076 if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1077 // Allocate inline type in fast path
1078 m = m->as_InlineType()->buffer(&kit);
1079 kit.map()->set_req(i, m);
1080 }
1081 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1082 // Allocate inline type in slow path
1083 PreserveJVMState pjvms(&kit);
1084 kit.set_map(slow_map);
1085 n = n->as_InlineType()->buffer(&kit);
1086 kit.map()->set_req(i, n);
1087 slow_map = kit.stop();
1088 }
1089 }
1090
1091 // There are 2 branches and the replaced nodes are only valid on
1092 // one: restore the replaced nodes to what they were before the
1093 // branch.
1094 kit.map()->set_replaced_nodes(replaced_nodes);
1095
1096 // Finish the diamond.
1097 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1098 RegionNode* region = new RegionNode(3);
1099 region->init_req(1, kit.control());
1100 region->init_req(2, slow_map->control());
1101 kit.set_control(gvn.transform(region));
1102 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1103 iophi->set_req(2, slow_map->i_o());
1104 kit.set_i_o(gvn.transform(iophi));
1105 // Merge memory
1106 kit.merge_memory(slow_map->merged_memory(), region, 2);
1107 // Transform new memory Phis.
1108 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1109 Node* phi = mms.memory();
1110 if (phi->is_Phi() && phi->in(0) == region) {
1111 mms.set_memory(gvn.transform(phi));
1112 }
1113 }
1114 for (uint i = TypeFunc::Parms; i < limit; i++) {
1115 // Skip unused stack slots; fast forward to monoff();
1116 if (i == tos) {
1117 i = kit.jvms()->monoff();
1118 if( i >= limit ) break;
1119 }
1120 Node* m = kit.map()->in(i);
1121 Node* n = slow_map->in(i);
1122 if (m != n) {
1123 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1124 Node* phi = PhiNode::make(region, m, t);
1125 phi->set_req(2, n);
1126 kit.map()->set_req(i, gvn.transform(phi));
1127 }
1128 }
1129 return kit.transfer_exceptions_into_jvms();
1130 }
1131
1132
1133 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1134 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1135 bool input_not_const;
1136 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1137 Compile* C = Compile::current();
1138 bool should_delay = C->should_delay_inlining();
1139 if (cg != nullptr) {
1140 if (should_delay && IncrementalInlineMH) {
1141 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1142 } else {
1143 return cg;
1144 }
1145 }
1146 int bci = jvms->bci();
1147 ciCallProfile profile = caller->call_profile_at_bci(bci);
1148 int call_site_count = caller->scale_count(profile.count());
1149
1150 if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1151 (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1152 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1153 } else {
1154 // Out-of-line call.
1155 return CallGenerator::for_direct_call(callee);
1156 }
1157 }
1158
1159
1160 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1161 GraphKit kit(jvms);
1162 PhaseGVN& gvn = kit.gvn();
1163 Compile* C = kit.C;
1164 vmIntrinsics::ID iid = callee->intrinsic_id();
1165 input_not_const = true;
1166 if (StressMethodHandleLinkerInlining) {
1167 allow_inline = false;
1168 }
1169 switch (iid) {
1170 case vmIntrinsics::_invokeBasic:
1171 {
1172 // Get MethodHandle receiver:
1173 Node* receiver = kit.argument(0);
1174 if (receiver->Opcode() == Op_ConP) {
1175 input_not_const = false;
1176 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1177 if (recv_toop != nullptr) {
1178 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1179 const int vtable_index = Method::invalid_vtable_index;
1187 false /* call_does_dispatch */,
1188 jvms,
1189 allow_inline,
1190 PROB_ALWAYS);
1191 return cg;
1192 } else {
1193 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1194 Type::str(receiver->bottom_type()));
1195 print_inlining_failure(C, callee, jvms, "receiver is always null");
1196 }
1197 } else {
1198 print_inlining_failure(C, callee, jvms, "receiver not constant");
1199 }
1200 } break;
1201
1202 case vmIntrinsics::_linkToVirtual:
1203 case vmIntrinsics::_linkToStatic:
1204 case vmIntrinsics::_linkToSpecial:
1205 case vmIntrinsics::_linkToInterface:
1206 {
1207 int nargs = callee->arg_size();
1208 // Get MemberName argument:
1209 Node* member_name = kit.argument(nargs - 1);
1210 if (member_name->Opcode() == Op_ConP) {
1211 input_not_const = false;
1212 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1213 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1214
1215 if (!ciMethod::is_consistent_info(callee, target)) {
1216 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1217 return nullptr;
1218 }
1219
1220 // In lambda forms we erase signature types to avoid resolving issues
1221 // involving class loaders. When we optimize a method handle invoke
1222 // to a direct call we must cast the receiver and arguments to its
1223 // actual types.
1224 ciSignature* signature = target->signature();
1225 const int receiver_skip = target->is_static() ? 0 : 1;
1226 // Cast receiver to its type.
1227 if (!target->is_static()) {
1228 Node* recv = kit.argument(0);
1229 Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1259 ciKlass* speculative_receiver_type = nullptr;
1260 if (is_virtual_or_interface) {
1261 ciInstanceKlass* klass = target->holder();
1262 Node* receiver_node = kit.argument(0);
1263 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1264 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1265 // optimize_virtual_call() takes 2 different holder
1266 // arguments for a corner case that doesn't apply here (see
1267 // Parse::do_call())
1268 target = C->optimize_virtual_call(caller, klass, klass,
1269 target, receiver_type, is_virtual,
1270 call_does_dispatch, vtable_index, // out-parameters
1271 false /* check_access */);
1272 // We lack profiling at this call but type speculation may
1273 // provide us with a type
1274 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1275 }
1276 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1277 allow_inline,
1278 PROB_ALWAYS,
1279 speculative_receiver_type,
1280 true);
1281 return cg;
1282 } else {
1283 print_inlining_failure(C, callee, jvms, "member_name not constant");
1284 }
1285 } break;
1286
1287 case vmIntrinsics::_linkToNative:
1288 print_inlining_failure(C, callee, jvms, "native call");
1289 break;
1290
1291 default:
1292 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1293 break;
1294 }
1295 return nullptr;
1296 }
1297
1298 //------------------------PredicatedIntrinsicGenerator------------------------------
1299 // Internal class which handles all predicated Intrinsic calls.
1300 class PredicatedIntrinsicGenerator : public CallGenerator {
1332 // do_intrinsic(0)
1333 // else
1334 // if (predicate(1))
1335 // do_intrinsic(1)
1336 // ...
1337 // else
1338 // do_java_comp
1339
1340 GraphKit kit(jvms);
1341 PhaseGVN& gvn = kit.gvn();
1342
1343 CompileLog* log = kit.C->log();
1344 if (log != nullptr) {
1345 log->elem("predicated_intrinsic bci='%d' method='%d'",
1346 jvms->bci(), log->identify(method()));
1347 }
1348
1349 if (!method()->is_static()) {
1350 // We need an explicit receiver null_check before checking its type in predicate.
1351 // We share a map with the caller, so his JVMS gets adjusted.
1352 kit.null_check_receiver_before_call(method());
1353 if (kit.stopped()) {
1354 return kit.transfer_exceptions_into_jvms();
1355 }
1356 }
1357
1358 int n_predicates = _intrinsic->predicates_count();
1359 assert(n_predicates > 0, "sanity");
1360
1361 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1362
1363 // Region for normal compilation code if intrinsic failed.
1364 Node* slow_region = new RegionNode(1);
1365
1366 int results = 0;
1367 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1368 #ifdef ASSERT
1369 JVMState* old_jvms = kit.jvms();
1370 SafePointNode* old_map = kit.map();
1371 Node* old_io = old_map->i_o();
1372 Node* old_mem = old_map->memory();
|