1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMemberName.hpp"
28 #include "ci/ciMethodHandle.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/inlinetypenode.hpp"
39 #include "opto/parse.hpp"
40 #include "opto/rootnode.hpp"
41 #include "opto/runtime.hpp"
42 #include "opto/subnode.hpp"
43 #include "runtime/os.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "utilities/debug.hpp"
46
47 // Utility function.
48 const TypeFunc* CallGenerator::tf() const {
49 return TypeFunc::make(method());
50 }
51
52 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
53 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
54 }
55
56 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
57 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
58 return is_inlined_method_handle_intrinsic(symbolic_info, m);
59 }
60
61 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
62 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
63 }
64
65 //-----------------------------ParseGenerator---------------------------------
66 // Internal class which handles all direct bytecode traversal.
67 class ParseGenerator : public InlineCallGenerator {
68 private:
69 bool _is_osr;
70 float _expected_uses;
71
72 public:
73 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
74 : InlineCallGenerator(method)
75 {
76 _is_osr = is_osr;
77 _expected_uses = expected_uses;
78 assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
79 }
80
81 virtual bool is_parse() const { return true; }
82 virtual JVMState* generate(JVMState* jvms);
83 int is_osr() { return _is_osr; }
84
85 };
86
87 JVMState* ParseGenerator::generate(JVMState* jvms) {
88 Compile* C = Compile::current();
89
90 if (is_osr()) {
91 // The JVMS for a OSR has a single argument (see its TypeFunc).
92 assert(jvms->depth() == 1, "no inline OSR");
93 }
94
95 if (C->failing()) {
96 return nullptr; // bailing out of the compile; do not try to parse
97 }
98
99 Parse parser(jvms, method(), _expected_uses);
100 if (C->failing()) return nullptr;
101
102 // Grab signature for matching/allocation
103 GraphKit& exits = parser.exits();
104
105 if (C->failing()) {
106 while (exits.pop_exception_state() != nullptr) ;
107 return nullptr;
108 }
109
110 assert(exits.jvms()->same_calls_as(jvms), "sanity");
111
112 // Simply return the exit state of the parser,
113 // augmented by any exceptional states.
114 return exits.transfer_exceptions_into_jvms();
115 }
116
117 //---------------------------DirectCallGenerator------------------------------
118 // Internal class which handles all out-of-line calls w/o receiver type checks.
119 class DirectCallGenerator : public CallGenerator {
120 private:
121 CallStaticJavaNode* _call_node;
122 // Force separate memory and I/O projections for the exceptional
123 // paths to facilitate late inlining.
124 bool _separate_io_proj;
125
126 protected:
127 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _call_node(nullptr),
133 _separate_io_proj(separate_io_proj)
134 {
135 if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
136 // If that call has not been optimized by the time optimizations are over,
137 // we'll need to add a call to create an inline type instance from the klass
138 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
139 // Separating memory and I/O projections for exceptions is required to
140 // perform that graph transformation.
141 _separate_io_proj = true;
142 }
143 }
144 virtual JVMState* generate(JVMState* jvms);
145
146 virtual CallNode* call_node() const { return _call_node; }
147 virtual CallGenerator* with_call_node(CallNode* call) {
148 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
149 dcg->set_call_node(call->as_CallStaticJava());
150 return dcg;
151 }
152 };
153
154 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
155 GraphKit kit(jvms);
156 PhaseGVN& gvn = kit.gvn();
157 bool is_static = method()->is_static();
158 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
159 : SharedRuntime::get_resolve_opt_virtual_call_stub();
160
161 if (kit.C->log() != nullptr) {
162 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
163 }
164
165 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
166 if (is_inlined_method_handle_intrinsic(jvms, method())) {
167 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
168 // additional information about the method being invoked should be attached
169 // to the call site to make resolution logic work
170 // (see SharedRuntime::resolve_static_call_C).
171 call->set_override_symbolic_info(true);
172 }
173 _call_node = call; // Save the call node in case we need it later
174 if (!is_static) {
175 // Make an explicit receiver null_check as part of this call.
176 // Since we share a map with the caller, his JVMS gets adjusted.
177 kit.null_check_receiver_before_call(method());
178 if (kit.stopped()) {
179 // And dump it back to the caller, decorated with any exceptions:
180 return kit.transfer_exceptions_into_jvms();
181 }
182 // Mark the call node as virtual, sort of:
183 call->set_optimized_virtual(true);
184 }
185 kit.set_arguments_for_java_call(call, is_late_inline());
186 if (kit.stopped()) {
187 return kit.transfer_exceptions_into_jvms();
188 }
189 kit.set_edges_for_java_call(call, false, _separate_io_proj);
190 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
191 kit.push_node(method()->return_type()->basic_type(), ret);
192 return kit.transfer_exceptions_into_jvms();
193 }
194
195 //--------------------------VirtualCallGenerator------------------------------
196 // Internal class which handles all out-of-line calls checking receiver type.
197 class VirtualCallGenerator : public CallGenerator {
198 private:
199 int _vtable_index;
200 bool _separate_io_proj;
201 CallDynamicJavaNode* _call_node;
202
203 protected:
204 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
205
206 public:
207 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
208 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
209 {
210 assert(vtable_index == Method::invalid_vtable_index ||
211 vtable_index >= 0, "either invalid or usable");
212 }
213 virtual bool is_virtual() const { return true; }
214 virtual JVMState* generate(JVMState* jvms);
215
216 virtual CallNode* call_node() const { return _call_node; }
217 int vtable_index() const { return _vtable_index; }
218
219 virtual CallGenerator* with_call_node(CallNode* call) {
220 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
221 cg->set_call_node(call->as_CallDynamicJava());
222 return cg;
223 }
224 };
225
226 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
227 GraphKit kit(jvms);
228 Node* receiver = kit.argument(0);
229 if (kit.C->log() != nullptr) {
230 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
231 }
232
233 // If the receiver is a constant null, do not torture the system
234 // by attempting to call through it. The compile will proceed
235 // correctly, but may bail out in final_graph_reshaping, because
236 // the call instruction will have a seemingly deficient out-count.
237 // (The bailout says something misleading about an "infinite loop".)
238 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
239 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
240 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
241 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
242 kit.inc_sp(arg_size); // restore arguments
243 kit.uncommon_trap(Deoptimization::Reason_null_check,
244 Deoptimization::Action_none,
245 nullptr, "null receiver");
246 return kit.transfer_exceptions_into_jvms();
247 }
248
249 // Ideally we would unconditionally do a null check here and let it
250 // be converted to an implicit check based on profile information.
251 // However currently the conversion to implicit null checks in
252 // Block::implicit_null_check() only looks for loads and stores, not calls.
253 ciMethod *caller = kit.method();
254 ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
255 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
256 ((ImplicitNullCheckThreshold > 0) && caller_md &&
257 (caller_md->trap_count(Deoptimization::Reason_null_check)
258 >= (uint)ImplicitNullCheckThreshold))) {
259 // Make an explicit receiver null_check as part of this call.
260 // Since we share a map with the caller, his JVMS gets adjusted.
261 receiver = kit.null_check_receiver_before_call(method());
262 if (kit.stopped()) {
263 // And dump it back to the caller, decorated with any exceptions:
264 return kit.transfer_exceptions_into_jvms();
265 }
266 }
267
268 assert(!method()->is_static(), "virtual call must not be to static");
269 assert(!method()->is_final(), "virtual call should not be to final");
270 assert(!method()->is_private(), "virtual call should not be to private");
271 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
272 "no vtable calls if +UseInlineCaches ");
273 address target = SharedRuntime::get_resolve_virtual_call_stub();
274 // Normal inline cache used for call
275 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
276 if (is_inlined_method_handle_intrinsic(jvms, method())) {
277 // To be able to issue a direct call (optimized virtual or virtual)
278 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
279 // about the method being invoked should be attached to the call site to
280 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
281 call->set_override_symbolic_info(true);
282 }
283 _call_node = call; // Save the call node in case we need it later
284
285 kit.set_arguments_for_java_call(call);
286 if (kit.stopped()) {
287 return kit.transfer_exceptions_into_jvms();
288 }
289 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
290 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
291 kit.push_node(method()->return_type()->basic_type(), ret);
292
293 // Represent the effect of an implicit receiver null_check
294 // as part of this call. Since we share a map with the caller,
295 // his JVMS gets adjusted.
296 kit.cast_not_null(receiver);
297 return kit.transfer_exceptions_into_jvms();
298 }
299
300 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
301 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
302 return new ParseGenerator(m, expected_uses);
303 }
304
305 // As a special case, the JVMS passed to this CallGenerator is
306 // for the method execution already in progress, not just the JVMS
307 // of the caller. Thus, this CallGenerator cannot be mixed with others!
308 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
309 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
310 float past_uses = m->interpreter_invocation_count();
311 float expected_uses = past_uses;
312 return new ParseGenerator(m, expected_uses, true);
313 }
314
315 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
316 assert(!m->is_abstract(), "for_direct_call mismatch");
317 return new DirectCallGenerator(m, separate_io_proj);
318 }
319
320 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
321 assert(!m->is_static(), "for_virtual_call mismatch");
322 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
323 return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
324 }
325
326 // Allow inlining decisions to be delayed
327 class LateInlineCallGenerator : public DirectCallGenerator {
328 private:
329 jlong _unique_id; // unique id for log compilation
330 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
331
332 protected:
333 CallGenerator* _inline_cg;
334 virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
335 virtual CallGenerator* inline_cg() const { return _inline_cg; }
336 virtual bool is_pure_call() const { return _is_pure_call; }
337
338 public:
339 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
340 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
341
342 virtual bool is_late_inline() const { return true; }
343
344 // Convert the CallStaticJava into an inline
345 virtual void do_late_inline();
346
347 virtual JVMState* generate(JVMState* jvms) {
348 Compile *C = Compile::current();
349
350 C->log_inline_id(this);
351
352 // Record that this call site should be revisited once the main
353 // parse is finished.
354 if (!is_mh_late_inline()) {
355 C->add_late_inline(this);
356 }
357
358 // Emit the CallStaticJava and request separate projections so
359 // that the late inlining logic can distinguish between fall
360 // through and exceptional uses of the memory and io projections
361 // as is done for allocations and macro expansion.
362 return DirectCallGenerator::generate(jvms);
363 }
364
365 virtual void set_unique_id(jlong id) {
366 _unique_id = id;
367 }
368
369 virtual jlong unique_id() const {
370 return _unique_id;
371 }
372
373 virtual CallGenerator* inline_cg() {
374 return _inline_cg;
375 }
376
377 virtual CallGenerator* with_call_node(CallNode* call) {
378 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
379 cg->set_call_node(call->as_CallStaticJava());
380 return cg;
381 }
382 };
383
384 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
385 return new LateInlineCallGenerator(method, inline_cg);
386 }
387
388 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
389 ciMethod* _caller;
390 bool _input_not_const;
391
392 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
393
394 public:
395 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
396 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
397
398 virtual bool is_mh_late_inline() const { return true; }
399
400 // Convert the CallStaticJava into an inline
401 virtual void do_late_inline();
402
403 virtual JVMState* generate(JVMState* jvms) {
404 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
405
406 Compile* C = Compile::current();
407 if (_input_not_const) {
408 // inlining won't be possible so no need to enqueue right now.
409 call_node()->set_generator(this);
410 } else {
411 C->add_late_inline(this);
412 }
413 return new_jvms;
414 }
415
416 virtual CallGenerator* with_call_node(CallNode* call) {
417 LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
418 cg->set_call_node(call->as_CallStaticJava());
419 return cg;
420 }
421 };
422
423 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
424 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
425 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
426 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
427 // of late inlining with exceptions.
428 assert(!jvms->method()->has_exception_handlers() ||
429 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
430 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
431 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
432 bool allow_inline = C->inlining_incrementally();
433 bool input_not_const = true;
434 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
435 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
436
437 if (cg != nullptr) {
438 // AlwaysIncrementalInline causes for_method_handle_inline() to
439 // return a LateInlineCallGenerator. Extract the
440 // InlineCallGenerator from it.
441 if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
442 cg = cg->inline_cg();
443 assert(cg != nullptr, "inline call generator expected");
444 }
445
446 if (!allow_inline) {
447 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
448 "late method handle call resolution");
449 }
450 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
451 _inline_cg = cg;
452 return true;
453 } else {
454 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
455 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
456 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
457 return false;
458 }
459 }
460
461 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
462 assert(IncrementalInlineMH, "required");
463 Compile::current()->mark_has_mh_late_inlines();
464 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
465 return cg;
466 }
467
468 // Allow inlining decisions to be delayed
469 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
470 private:
471 jlong _unique_id; // unique id for log compilation
472 CallGenerator* _inline_cg;
473 ciMethod* _callee;
474 bool _is_pure_call;
475 float _prof_factor;
476
477 protected:
478 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
479 virtual CallGenerator* inline_cg() const { return _inline_cg; }
480 virtual bool is_pure_call() const { return _is_pure_call; }
481
482 public:
483 LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
484 : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
485 _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
486 assert(IncrementalInlineVirtual, "required");
487 }
488
489 virtual bool is_late_inline() const { return true; }
490
491 virtual bool is_virtual_late_inline() const { return true; }
492
493 // Convert the CallDynamicJava into an inline
494 virtual void do_late_inline();
495
496 virtual ciMethod* callee_method() {
497 return _callee;
498 }
499
500 virtual void set_callee_method(ciMethod* m) {
501 assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
502 _callee = m;
503 }
504
505 virtual JVMState* generate(JVMState* jvms) {
506 // Emit the CallDynamicJava and request separate projections so
507 // that the late inlining logic can distinguish between fall
508 // through and exceptional uses of the memory and io projections
509 // as is done for allocations and macro expansion.
510 JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
511 if (call_node() != nullptr) {
512 call_node()->set_generator(this);
513 }
514 return new_jvms;
515 }
516
517 virtual void set_unique_id(jlong id) {
518 _unique_id = id;
519 }
520
521 virtual jlong unique_id() const {
522 return _unique_id;
523 }
524
525 virtual CallGenerator* with_call_node(CallNode* call) {
526 LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
527 cg->set_call_node(call->as_CallDynamicJava());
528 return cg;
529 }
530 };
531
532 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
533 // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
534 // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
535
536 // Implicit receiver null checks introduce problems when exception states are combined.
537 Node* receiver = jvms->map()->argument(jvms, 0);
538 const Type* recv_type = C->initial_gvn()->type(receiver);
539 if (recv_type->maybe_null()) {
540 C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
541 "late call devirtualization failed (receiver may be null)");
542 return false;
543 }
544 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
545 bool allow_inline = C->inlining_incrementally();
546 if (!allow_inline && _callee->holder()->is_interface()) {
547 // Don't convert the interface call to a direct call guarded by an interface subtype check.
548 C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
549 "late call devirtualization failed (interface call)");
550 return false;
551 }
552 CallGenerator* cg = C->call_generator(_callee,
553 vtable_index(),
554 false /*call_does_dispatch*/,
555 jvms,
556 allow_inline,
557 _prof_factor,
558 nullptr /*speculative_receiver_type*/,
559 true /*allow_intrinsics*/);
560
561 if (cg != nullptr) {
562 if (!allow_inline) {
563 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
564 }
565 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
566 _inline_cg = cg;
567 return true;
568 } else {
569 // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
570 assert(false, "no progress");
571 return false;
572 }
573 }
574
575 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
576 assert(IncrementalInlineVirtual, "required");
577 assert(!m->is_static(), "for_virtual_call mismatch");
578 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
579 return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
580 }
581
582 void LateInlineCallGenerator::do_late_inline() {
583 CallGenerator::do_late_inline_helper();
584 }
585
586 void LateInlineMHCallGenerator::do_late_inline() {
587 CallGenerator::do_late_inline_helper();
588 }
589
590 void LateInlineVirtualCallGenerator::do_late_inline() {
591 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
592 CallGenerator::do_late_inline_helper();
593 }
594
595 void CallGenerator::do_late_inline_helper() {
596 assert(is_late_inline(), "only late inline allowed");
597
598 // Can't inline it
599 CallNode* call = call_node();
600 if (call == nullptr || call->outcnt() == 0 ||
601 call->in(0) == nullptr || call->in(0)->is_top()) {
602 return;
603 }
604
605 const TypeTuple* r = call->tf()->domain_cc();
606 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
607 if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
608 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
609 return;
610 }
611 }
612
613 if (call->in(TypeFunc::Memory)->is_top()) {
614 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
615 return;
616 }
617 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
618 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
619 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
620 return; // dead path
621 }
622 }
623
624 // check for unreachable loop
625 // Similar to incremental inlining, don't assert that all call
626 // projections are still there for post-parse call devirtualization.
627 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
628 CallProjections* callprojs = call->extract_projections(true, do_asserts);
629 if ((callprojs->fallthrough_catchproj == call->in(0)) ||
630 (callprojs->catchall_catchproj == call->in(0)) ||
631 (callprojs->fallthrough_memproj == call->in(TypeFunc::Memory)) ||
632 (callprojs->catchall_memproj == call->in(TypeFunc::Memory)) ||
633 (callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
634 (callprojs->catchall_ioproj == call->in(TypeFunc::I_O)) ||
635 (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
636 return;
637 }
638
639 Compile* C = Compile::current();
640 // Remove inlined methods from Compiler's lists.
641 if (call->is_macro()) {
642 C->remove_macro_node(call);
643 }
644
645
646 bool result_not_used = true;
647 for (uint i = 0; i < callprojs->nb_resproj; i++) {
648 if (callprojs->resproj[i] != nullptr) {
649 if (callprojs->resproj[i]->outcnt() != 0) {
650 result_not_used = false;
651 }
652 if (call->find_edge(callprojs->resproj[i]) != -1) {
653 return;
654 }
655 }
656 }
657
658 if (is_pure_call() && result_not_used) {
659 // The call is marked as pure (no important side effects), but result isn't used.
660 // It's safe to remove the call.
661 GraphKit kit(call->jvms());
662 kit.replace_call(call, C->top(), true, do_asserts);
663 } else {
664 // Make a clone of the JVMState that appropriate to use for driving a parse
665 JVMState* old_jvms = call->jvms();
666 JVMState* jvms = old_jvms->clone_shallow(C);
667 uint size = call->req();
668 SafePointNode* map = new SafePointNode(size, jvms);
669 for (uint i1 = 0; i1 < size; i1++) {
670 map->init_req(i1, call->in(i1));
671 }
672
673 PhaseGVN& gvn = *C->initial_gvn();
674 // Make sure the state is a MergeMem for parsing.
675 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
676 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
677 gvn.set_type_bottom(mem);
678 map->set_req(TypeFunc::Memory, mem);
679 }
680
681 // blow away old call arguments
682 for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
683 map->set_req(i1, C->top());
684 }
685 jvms->set_map(map);
686
687 // Make enough space in the expression stack to transfer
688 // the incoming arguments and return value.
689 map->ensure_stack(jvms, jvms->method()->max_stack());
690 const TypeTuple* domain_sig = call->_tf->domain_sig();
691 uint nargs = method()->arg_size();
692 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
693
694 uint j = TypeFunc::Parms;
695 int arg_num = 0;
696 for (uint i1 = 0; i1 < nargs; i1++) {
697 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
698 if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
699 // Inline type arguments are not passed by reference: we get an argument per
700 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
701 GraphKit arg_kit(jvms, &gvn);
702 Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
703 map->set_control(arg_kit.control());
704 map->set_argument(jvms, i1, vt);
705 } else {
706 map->set_argument(jvms, i1, call->in(j++));
707 }
708 if (t != Type::HALF) {
709 arg_num++;
710 }
711 }
712
713 C->log_late_inline(this);
714
715 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
716 if (!do_late_inline_check(C, jvms)) {
717 map->disconnect_inputs(C);
718 return;
719 }
720
721 // Check if we are late inlining a method handle call that returns an inline type as fields.
722 Node* buffer_oop = nullptr;
723 ciMethod* inline_method = inline_cg()->method();
724 ciType* return_type = inline_method->return_type();
725 if (!call->tf()->returns_inline_type_as_fields() &&
726 return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
727 // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
728 // Do this before the method handle call in case the buffer allocation triggers deoptimization and
729 // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
730 GraphKit arg_kit(jvms, &gvn);
731 {
732 PreserveReexecuteState preexecs(&arg_kit);
733 arg_kit.jvms()->set_should_reexecute(true);
734 arg_kit.inc_sp(nargs);
735 Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
736 buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
737 }
738 jvms = arg_kit.transfer_exceptions_into_jvms();
739 }
740
741 // Setup default node notes to be picked up by the inlining
742 Node_Notes* old_nn = C->node_notes_at(call->_idx);
743 if (old_nn != nullptr) {
744 Node_Notes* entry_nn = old_nn->clone(C);
745 entry_nn->set_jvms(jvms);
746 C->set_default_node_notes(entry_nn);
747 }
748
749 // Now perform the inlining using the synthesized JVMState
750 JVMState* new_jvms = inline_cg()->generate(jvms);
751 if (new_jvms == nullptr) return; // no change
752 if (C->failing()) return;
753
754 if (is_mh_late_inline()) {
755 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
756 } else if (is_string_late_inline()) {
757 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
758 } else if (is_boxing_late_inline()) {
759 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
760 } else if (is_vector_reboxing_late_inline()) {
761 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
762 } else {
763 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
764 }
765
766 // Capture any exceptional control flow
767 GraphKit kit(new_jvms);
768
769 // Find the result object
770 Node* result = C->top();
771 int result_size = method()->return_type()->size();
772 if (result_size != 0 && !kit.stopped()) {
773 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
774 }
775
776 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
777 result = kit.must_be_not_null(result, false);
778 }
779
780 if (inline_cg()->is_inline()) {
781 C->set_has_loops(C->has_loops() || inline_method->has_loops());
782 C->env()->notice_inlined_method(inline_method);
783 }
784 C->set_inlining_progress(true);
785 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
786
787 // Handle inline type returns
788 InlineTypeNode* vt = result->isa_InlineType();
789 if (vt != nullptr) {
790 if (call->tf()->returns_inline_type_as_fields()) {
791 vt->replace_call_results(&kit, call, C);
792 } else {
793 // Result might still be allocated (for example, if it has been stored to a non-flat field)
794 if (!vt->is_allocated(&kit.gvn())) {
795 assert(buffer_oop != nullptr, "should have allocated a buffer");
796 RegionNode* region = new RegionNode(3);
797
798 // Check if result is null
799 Node* null_ctl = kit.top();
800 kit.null_check_common(vt->get_null_marker(), T_INT, false, &null_ctl);
801 region->init_req(1, null_ctl);
802 PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
803 Node* init_mem = kit.reset_memory();
804 PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
805
806 // Not null, initialize the buffer
807 kit.set_all_memory(init_mem);
808
809 Node* payload_ptr = kit.basic_plus_adr(buffer_oop, kit.gvn().type(vt)->inline_klass()->payload_offset());
810 vt->store_flat(&kit, buffer_oop, payload_ptr, false, true, true, IN_HEAP | MO_UNORDERED);
811 // Do not let stores that initialize this buffer be reordered with a subsequent
812 // store that would make this buffer accessible by other threads.
813 AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
814 assert(alloc != nullptr, "must have an allocation node");
815 kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
816 region->init_req(2, kit.control());
817 oop->init_req(2, buffer_oop);
818 mem->init_req(2, kit.merged_memory());
819
820 // Update oop input to buffer
821 kit.gvn().hash_delete(vt);
822 vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
823 vt->set_is_buffered(kit.gvn());
824 vt = kit.gvn().transform(vt)->as_InlineType();
825
826 kit.set_control(kit.gvn().transform(region));
827 kit.set_all_memory(kit.gvn().transform(mem));
828 kit.record_for_igvn(region);
829 kit.record_for_igvn(oop);
830 kit.record_for_igvn(mem);
831 }
832 result = vt;
833 }
834 DEBUG_ONLY(buffer_oop = nullptr);
835 } else {
836 assert(result->is_top() || !call->tf()->returns_inline_type_as_fields() || !call->as_CallJava()->method()->return_type()->is_loaded(), "Unexpected return value");
837 }
838 assert(buffer_oop == nullptr, "unused buffer allocation");
839
840 kit.replace_call(call, result, true, do_asserts);
841 }
842 }
843
844 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
845
846 public:
847 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
848 LateInlineCallGenerator(method, inline_cg) {}
849
850 virtual JVMState* generate(JVMState* jvms) {
851 Compile *C = Compile::current();
852
853 C->log_inline_id(this);
854
855 C->add_string_late_inline(this);
856
857 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
858 return new_jvms;
859 }
860
861 virtual bool is_string_late_inline() const { return true; }
862
863 virtual CallGenerator* with_call_node(CallNode* call) {
864 LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
865 cg->set_call_node(call->as_CallStaticJava());
866 return cg;
867 }
868 };
869
870 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
871 return new LateInlineStringCallGenerator(method, inline_cg);
872 }
873
874 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
875
876 public:
877 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
878 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
879
880 virtual JVMState* generate(JVMState* jvms) {
881 Compile *C = Compile::current();
882
883 C->log_inline_id(this);
884
885 C->add_boxing_late_inline(this);
886
887 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
888 return new_jvms;
889 }
890
891 virtual bool is_boxing_late_inline() const { return true; }
892
893 virtual CallGenerator* with_call_node(CallNode* call) {
894 LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
895 cg->set_call_node(call->as_CallStaticJava());
896 return cg;
897 }
898 };
899
900 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
901 return new LateInlineBoxingCallGenerator(method, inline_cg);
902 }
903
904 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
905
906 public:
907 LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
908 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
909
910 virtual JVMState* generate(JVMState* jvms) {
911 Compile *C = Compile::current();
912
913 C->log_inline_id(this);
914
915 C->add_vector_reboxing_late_inline(this);
916
917 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
918 return new_jvms;
919 }
920
921 virtual bool is_vector_reboxing_late_inline() const { return true; }
922
923 virtual CallGenerator* with_call_node(CallNode* call) {
924 LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
925 cg->set_call_node(call->as_CallStaticJava());
926 return cg;
927 }
928 };
929
930 // static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
931 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
932 return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
933 }
934
935 //------------------------PredictedCallGenerator------------------------------
936 // Internal class which handles all out-of-line calls checking receiver type.
937 class PredictedCallGenerator : public CallGenerator {
938 ciKlass* _predicted_receiver;
939 CallGenerator* _if_missed;
940 CallGenerator* _if_hit;
941 float _hit_prob;
942 bool _exact_check;
943
944 public:
945 PredictedCallGenerator(ciKlass* predicted_receiver,
946 CallGenerator* if_missed,
947 CallGenerator* if_hit, bool exact_check,
948 float hit_prob)
949 : CallGenerator(if_missed->method())
950 {
951 // The call profile data may predict the hit_prob as extreme as 0 or 1.
952 // Remove the extremes values from the range.
953 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
954 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
955
956 _predicted_receiver = predicted_receiver;
957 _if_missed = if_missed;
958 _if_hit = if_hit;
959 _hit_prob = hit_prob;
960 _exact_check = exact_check;
961 }
962
963 virtual bool is_virtual() const { return true; }
964 virtual bool is_inline() const { return _if_hit->is_inline(); }
965 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
966
967 virtual JVMState* generate(JVMState* jvms);
968 };
969
970
971 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
972 CallGenerator* if_missed,
973 CallGenerator* if_hit,
974 float hit_prob) {
975 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
976 /*exact_check=*/true, hit_prob);
977 }
978
979 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
980 CallGenerator* if_missed,
981 CallGenerator* if_hit) {
982 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
983 /*exact_check=*/false, PROB_ALWAYS);
984 }
985
986 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
987 GraphKit kit(jvms);
988 PhaseGVN& gvn = kit.gvn();
989 // We need an explicit receiver null_check before checking its type.
990 // We share a map with the caller, so his JVMS gets adjusted.
991 Node* receiver = kit.argument(0);
992 CompileLog* log = kit.C->log();
993 if (log != nullptr) {
994 log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
995 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
996 }
997
998 receiver = kit.null_check_receiver_before_call(method());
999 if (kit.stopped()) {
1000 return kit.transfer_exceptions_into_jvms();
1001 }
1002
1003 // Make a copy of the replaced nodes in case we need to restore them
1004 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
1005 replaced_nodes.clone();
1006
1007 Node* casted_receiver = receiver; // will get updated in place...
1008 Node* slow_ctl = nullptr;
1009 if (_exact_check) {
1010 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
1011 &casted_receiver);
1012 } else {
1013 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
1014 &casted_receiver);
1015 }
1016
1017 SafePointNode* slow_map = nullptr;
1018 JVMState* slow_jvms = nullptr;
1019 { PreserveJVMState pjvms(&kit);
1020 kit.set_control(slow_ctl);
1021 if (!kit.stopped()) {
1022 slow_jvms = _if_missed->generate(kit.sync_jvms());
1023 if (kit.failing())
1024 return nullptr; // might happen because of NodeCountInliningCutoff
1025 assert(slow_jvms != nullptr, "must be");
1026 kit.add_exception_states_from(slow_jvms);
1027 kit.set_map(slow_jvms->map());
1028 if (!kit.stopped())
1029 slow_map = kit.stop();
1030 }
1031 }
1032
1033 if (kit.stopped()) {
1034 // Instance does not match the predicted type.
1035 kit.set_jvms(slow_jvms);
1036 return kit.transfer_exceptions_into_jvms();
1037 }
1038
1039 // Fall through if the instance matches the desired type.
1040 kit.replace_in_map(receiver, casted_receiver);
1041
1042 // Make the hot call:
1043 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
1044 if (kit.failing()) {
1045 return nullptr;
1046 }
1047 if (new_jvms == nullptr) {
1048 // Inline failed, so make a direct call.
1049 assert(_if_hit->is_inline(), "must have been a failed inline");
1050 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1051 new_jvms = cg->generate(kit.sync_jvms());
1052 }
1053 kit.add_exception_states_from(new_jvms);
1054 kit.set_jvms(new_jvms);
1055
1056 // Need to merge slow and fast?
1057 if (slow_map == nullptr) {
1058 // The fast path is the only path remaining.
1059 return kit.transfer_exceptions_into_jvms();
1060 }
1061
1062 if (kit.stopped()) {
1063 // Inlined method threw an exception, so it's just the slow path after all.
1064 kit.set_jvms(slow_jvms);
1065 return kit.transfer_exceptions_into_jvms();
1066 }
1067
1068 // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1069 uint tos = kit.jvms()->stkoff() + kit.sp();
1070 uint limit = slow_map->req();
1071 for (uint i = TypeFunc::Parms; i < limit; i++) {
1072 Node* m = kit.map()->in(i);
1073 Node* n = slow_map->in(i);
1074 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1075 // TODO 8284443 still needed?
1076 if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1077 // Allocate inline type in fast path
1078 m = m->as_InlineType()->buffer(&kit);
1079 kit.map()->set_req(i, m);
1080 }
1081 if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1082 // Allocate inline type in slow path
1083 PreserveJVMState pjvms(&kit);
1084 kit.set_map(slow_map);
1085 n = n->as_InlineType()->buffer(&kit);
1086 kit.map()->set_req(i, n);
1087 slow_map = kit.stop();
1088 }
1089 }
1090
1091 // There are 2 branches and the replaced nodes are only valid on
1092 // one: restore the replaced nodes to what they were before the
1093 // branch.
1094 kit.map()->set_replaced_nodes(replaced_nodes);
1095
1096 // Finish the diamond.
1097 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1098 RegionNode* region = new RegionNode(3);
1099 region->init_req(1, kit.control());
1100 region->init_req(2, slow_map->control());
1101 kit.set_control(gvn.transform(region));
1102 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1103 iophi->set_req(2, slow_map->i_o());
1104 kit.set_i_o(gvn.transform(iophi));
1105 // Merge memory
1106 kit.merge_memory(slow_map->merged_memory(), region, 2);
1107 // Transform new memory Phis.
1108 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1109 Node* phi = mms.memory();
1110 if (phi->is_Phi() && phi->in(0) == region) {
1111 mms.set_memory(gvn.transform(phi));
1112 }
1113 }
1114 for (uint i = TypeFunc::Parms; i < limit; i++) {
1115 // Skip unused stack slots; fast forward to monoff();
1116 if (i == tos) {
1117 i = kit.jvms()->monoff();
1118 if( i >= limit ) break;
1119 }
1120 Node* m = kit.map()->in(i);
1121 Node* n = slow_map->in(i);
1122 if (m != n) {
1123 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1124 Node* phi = PhiNode::make(region, m, t);
1125 phi->set_req(2, n);
1126 kit.map()->set_req(i, gvn.transform(phi));
1127 }
1128 }
1129 return kit.transfer_exceptions_into_jvms();
1130 }
1131
1132
1133 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1134 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1135 bool input_not_const;
1136 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1137 Compile* C = Compile::current();
1138 bool should_delay = C->should_delay_inlining();
1139 if (cg != nullptr) {
1140 if (should_delay && IncrementalInlineMH) {
1141 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1142 } else {
1143 return cg;
1144 }
1145 }
1146 int bci = jvms->bci();
1147 ciCallProfile profile = caller->call_profile_at_bci(bci);
1148 int call_site_count = caller->scale_count(profile.count());
1149
1150 if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1151 (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1152 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1153 } else {
1154 // Out-of-line call.
1155 return CallGenerator::for_direct_call(callee);
1156 }
1157 }
1158
1159
1160 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1161 GraphKit kit(jvms);
1162 PhaseGVN& gvn = kit.gvn();
1163 Compile* C = kit.C;
1164 vmIntrinsics::ID iid = callee->intrinsic_id();
1165 input_not_const = true;
1166 if (StressMethodHandleLinkerInlining) {
1167 allow_inline = false;
1168 }
1169 switch (iid) {
1170 case vmIntrinsics::_invokeBasic:
1171 {
1172 // Get MethodHandle receiver:
1173 Node* receiver = kit.argument(0);
1174 if (receiver->Opcode() == Op_ConP) {
1175 input_not_const = false;
1176 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1177 if (recv_toop != nullptr) {
1178 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1179 const int vtable_index = Method::invalid_vtable_index;
1180
1181 if (!ciMethod::is_consistent_info(callee, target)) {
1182 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1183 return nullptr;
1184 }
1185
1186 CallGenerator *cg = C->call_generator(target, vtable_index,
1187 false /* call_does_dispatch */,
1188 jvms,
1189 allow_inline,
1190 PROB_ALWAYS);
1191 return cg;
1192 } else {
1193 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1194 Type::str(receiver->bottom_type()));
1195 print_inlining_failure(C, callee, jvms, "receiver is always null");
1196 }
1197 } else {
1198 print_inlining_failure(C, callee, jvms, "receiver not constant");
1199 }
1200 } break;
1201
1202 case vmIntrinsics::_linkToVirtual:
1203 case vmIntrinsics::_linkToStatic:
1204 case vmIntrinsics::_linkToSpecial:
1205 case vmIntrinsics::_linkToInterface:
1206 {
1207 int nargs = callee->arg_size();
1208 // Get MemberName argument:
1209 Node* member_name = kit.argument(nargs - 1);
1210 if (member_name->Opcode() == Op_ConP) {
1211 input_not_const = false;
1212 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1213 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1214
1215 if (!ciMethod::is_consistent_info(callee, target)) {
1216 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1217 return nullptr;
1218 }
1219
1220 // In lambda forms we erase signature types to avoid resolving issues
1221 // involving class loaders. When we optimize a method handle invoke
1222 // to a direct call we must cast the receiver and arguments to its
1223 // actual types.
1224 ciSignature* signature = target->signature();
1225 const int receiver_skip = target->is_static() ? 0 : 1;
1226 // Cast receiver to its type.
1227 if (!target->is_static()) {
1228 Node* recv = kit.argument(0);
1229 Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1230 if (casted_recv->is_top()) {
1231 print_inlining_failure(C, callee, jvms, "argument types mismatch");
1232 return nullptr; // FIXME: effectively dead; issue a halt node instead
1233 } else if (casted_recv != recv) {
1234 kit.set_argument(0, casted_recv);
1235 }
1236 }
1237 // Cast reference arguments to its type.
1238 for (int i = 0, j = 0; i < signature->count(); i++) {
1239 ciType* t = signature->type_at(i);
1240 if (t->is_klass()) {
1241 Node* arg = kit.argument(receiver_skip + j);
1242 Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1243 if (casted_arg->is_top()) {
1244 print_inlining_failure(C, callee, jvms, "argument types mismatch");
1245 return nullptr; // FIXME: effectively dead; issue a halt node instead
1246 } else if (casted_arg != arg) {
1247 kit.set_argument(receiver_skip + j, casted_arg);
1248 }
1249 }
1250 j += t->size(); // long and double take two slots
1251 }
1252
1253 // Try to get the most accurate receiver type
1254 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1255 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1256 int vtable_index = Method::invalid_vtable_index;
1257 bool call_does_dispatch = false;
1258
1259 ciKlass* speculative_receiver_type = nullptr;
1260 if (is_virtual_or_interface) {
1261 ciInstanceKlass* klass = target->holder();
1262 Node* receiver_node = kit.argument(0);
1263 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1264 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1265 // optimize_virtual_call() takes 2 different holder
1266 // arguments for a corner case that doesn't apply here (see
1267 // Parse::do_call())
1268 target = C->optimize_virtual_call(caller, klass, klass,
1269 target, receiver_type, is_virtual,
1270 call_does_dispatch, vtable_index, // out-parameters
1271 false /* check_access */);
1272 // We lack profiling at this call but type speculation may
1273 // provide us with a type
1274 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1275 }
1276 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1277 allow_inline,
1278 PROB_ALWAYS,
1279 speculative_receiver_type,
1280 true);
1281 return cg;
1282 } else {
1283 print_inlining_failure(C, callee, jvms, "member_name not constant");
1284 }
1285 } break;
1286
1287 case vmIntrinsics::_linkToNative:
1288 print_inlining_failure(C, callee, jvms, "native call");
1289 break;
1290
1291 default:
1292 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1293 break;
1294 }
1295 return nullptr;
1296 }
1297
1298 //------------------------PredicatedIntrinsicGenerator------------------------------
1299 // Internal class which handles all predicated Intrinsic calls.
1300 class PredicatedIntrinsicGenerator : public CallGenerator {
1301 CallGenerator* _intrinsic;
1302 CallGenerator* _cg;
1303
1304 public:
1305 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1306 CallGenerator* cg)
1307 : CallGenerator(cg->method())
1308 {
1309 _intrinsic = intrinsic;
1310 _cg = cg;
1311 }
1312
1313 virtual bool is_virtual() const { return true; }
1314 virtual bool is_inline() const { return true; }
1315 virtual bool is_intrinsic() const { return true; }
1316
1317 virtual JVMState* generate(JVMState* jvms);
1318 };
1319
1320
1321 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1322 CallGenerator* cg) {
1323 return new PredicatedIntrinsicGenerator(intrinsic, cg);
1324 }
1325
1326
1327 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1328 // The code we want to generate here is:
1329 // if (receiver == nullptr)
1330 // uncommon_Trap
1331 // if (predicate(0))
1332 // do_intrinsic(0)
1333 // else
1334 // if (predicate(1))
1335 // do_intrinsic(1)
1336 // ...
1337 // else
1338 // do_java_comp
1339
1340 GraphKit kit(jvms);
1341 PhaseGVN& gvn = kit.gvn();
1342
1343 CompileLog* log = kit.C->log();
1344 if (log != nullptr) {
1345 log->elem("predicated_intrinsic bci='%d' method='%d'",
1346 jvms->bci(), log->identify(method()));
1347 }
1348
1349 if (!method()->is_static()) {
1350 // We need an explicit receiver null_check before checking its type in predicate.
1351 // We share a map with the caller, so his JVMS gets adjusted.
1352 kit.null_check_receiver_before_call(method());
1353 if (kit.stopped()) {
1354 return kit.transfer_exceptions_into_jvms();
1355 }
1356 }
1357
1358 int n_predicates = _intrinsic->predicates_count();
1359 assert(n_predicates > 0, "sanity");
1360
1361 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1362
1363 // Region for normal compilation code if intrinsic failed.
1364 Node* slow_region = new RegionNode(1);
1365
1366 int results = 0;
1367 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1368 #ifdef ASSERT
1369 JVMState* old_jvms = kit.jvms();
1370 SafePointNode* old_map = kit.map();
1371 Node* old_io = old_map->i_o();
1372 Node* old_mem = old_map->memory();
1373 Node* old_exc = old_map->next_exception();
1374 #endif
1375 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1376 #ifdef ASSERT
1377 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1378 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1379 SafePointNode* new_map = kit.map();
1380 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1381 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1382 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1383 #endif
1384 if (!kit.stopped()) {
1385 PreserveJVMState pjvms(&kit);
1386 // Generate intrinsic code:
1387 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1388 if (kit.failing()) {
1389 return nullptr;
1390 }
1391 if (new_jvms == nullptr) {
1392 // Intrinsic failed, use normal compilation path for this predicate.
1393 slow_region->add_req(kit.control());
1394 } else {
1395 kit.add_exception_states_from(new_jvms);
1396 kit.set_jvms(new_jvms);
1397 if (!kit.stopped()) {
1398 result_jvms[results++] = kit.jvms();
1399 }
1400 }
1401 }
1402 if (else_ctrl == nullptr) {
1403 else_ctrl = kit.C->top();
1404 }
1405 kit.set_control(else_ctrl);
1406 }
1407 if (!kit.stopped()) {
1408 // Final 'else' after predicates.
1409 slow_region->add_req(kit.control());
1410 }
1411 if (slow_region->req() > 1) {
1412 PreserveJVMState pjvms(&kit);
1413 // Generate normal compilation code:
1414 kit.set_control(gvn.transform(slow_region));
1415 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1416 if (kit.failing())
1417 return nullptr; // might happen because of NodeCountInliningCutoff
1418 assert(new_jvms != nullptr, "must be");
1419 kit.add_exception_states_from(new_jvms);
1420 kit.set_jvms(new_jvms);
1421 if (!kit.stopped()) {
1422 result_jvms[results++] = kit.jvms();
1423 }
1424 }
1425
1426 if (results == 0) {
1427 // All paths ended in uncommon traps.
1428 (void) kit.stop();
1429 return kit.transfer_exceptions_into_jvms();
1430 }
1431
1432 if (results == 1) { // Only one path
1433 kit.set_jvms(result_jvms[0]);
1434 return kit.transfer_exceptions_into_jvms();
1435 }
1436
1437 // Merge all paths.
1438 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1439 RegionNode* region = new RegionNode(results + 1);
1440 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1441 for (int i = 0; i < results; i++) {
1442 JVMState* jvms = result_jvms[i];
1443 int path = i + 1;
1444 SafePointNode* map = jvms->map();
1445 region->init_req(path, map->control());
1446 iophi->set_req(path, map->i_o());
1447 if (i == 0) {
1448 kit.set_jvms(jvms);
1449 } else {
1450 kit.merge_memory(map->merged_memory(), region, path);
1451 }
1452 }
1453 kit.set_control(gvn.transform(region));
1454 kit.set_i_o(gvn.transform(iophi));
1455 // Transform new memory Phis.
1456 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1457 Node* phi = mms.memory();
1458 if (phi->is_Phi() && phi->in(0) == region) {
1459 mms.set_memory(gvn.transform(phi));
1460 }
1461 }
1462
1463 // Merge debug info.
1464 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1465 uint tos = kit.jvms()->stkoff() + kit.sp();
1466 Node* map = kit.map();
1467 uint limit = map->req();
1468 for (uint i = TypeFunc::Parms; i < limit; i++) {
1469 // Skip unused stack slots; fast forward to monoff();
1470 if (i == tos) {
1471 i = kit.jvms()->monoff();
1472 if( i >= limit ) break;
1473 }
1474 Node* n = map->in(i);
1475 ins[0] = n;
1476 const Type* t = gvn.type(n);
1477 bool needs_phi = false;
1478 for (int j = 1; j < results; j++) {
1479 JVMState* jvms = result_jvms[j];
1480 Node* jmap = jvms->map();
1481 Node* m = nullptr;
1482 if (jmap->req() > i) {
1483 m = jmap->in(i);
1484 if (m != n) {
1485 needs_phi = true;
1486 t = t->meet_speculative(gvn.type(m));
1487 }
1488 }
1489 ins[j] = m;
1490 }
1491 if (needs_phi) {
1492 Node* phi = PhiNode::make(region, n, t);
1493 for (int j = 1; j < results; j++) {
1494 phi->set_req(j + 1, ins[j]);
1495 }
1496 map->set_req(i, gvn.transform(phi));
1497 }
1498 }
1499
1500 return kit.transfer_exceptions_into_jvms();
1501 }
1502
1503 //-------------------------UncommonTrapCallGenerator-----------------------------
1504 // Internal class which handles all out-of-line calls checking receiver type.
1505 class UncommonTrapCallGenerator : public CallGenerator {
1506 Deoptimization::DeoptReason _reason;
1507 Deoptimization::DeoptAction _action;
1508
1509 public:
1510 UncommonTrapCallGenerator(ciMethod* m,
1511 Deoptimization::DeoptReason reason,
1512 Deoptimization::DeoptAction action)
1513 : CallGenerator(m)
1514 {
1515 _reason = reason;
1516 _action = action;
1517 }
1518
1519 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1520 virtual bool is_trap() const { return true; }
1521
1522 virtual JVMState* generate(JVMState* jvms);
1523 };
1524
1525
1526 CallGenerator*
1527 CallGenerator::for_uncommon_trap(ciMethod* m,
1528 Deoptimization::DeoptReason reason,
1529 Deoptimization::DeoptAction action) {
1530 return new UncommonTrapCallGenerator(m, reason, action);
1531 }
1532
1533
1534 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1535 GraphKit kit(jvms);
1536 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1537 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1538 // Use callsite signature always.
1539 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1540 int nargs = declared_method->arg_size();
1541 kit.inc_sp(nargs);
1542 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1543 if (_reason == Deoptimization::Reason_class_check &&
1544 _action == Deoptimization::Action_maybe_recompile) {
1545 // Temp fix for 6529811
1546 // Don't allow uncommon_trap to override our decision to recompile in the event
1547 // of a class cast failure for a monomorphic call as it will never let us convert
1548 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1549 bool keep_exact_action = true;
1550 kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1551 } else {
1552 kit.uncommon_trap(_reason, _action);
1553 }
1554 return kit.transfer_exceptions_into_jvms();
1555 }
1556
1557 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1558
1559 // (Node: Merged hook_up_exits into ParseGenerator::generate.)