1 /*
2 * Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/bcEscapeAnalyzer.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMemberName.hpp"
28 #include "ci/ciMethodHandle.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "runtime/os.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "utilities/debug.hpp"
44
45 // Utility function.
46 const TypeFunc* CallGenerator::tf() const {
47 return TypeFunc::make(method());
48 }
49
50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
51 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
52 }
53
54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
55 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
56 return is_inlined_method_handle_intrinsic(symbolic_info, m);
57 }
58
59 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
60 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
61 }
62
63 //-----------------------------ParseGenerator---------------------------------
64 // Internal class which handles all direct bytecode traversal.
65 class ParseGenerator : public InlineCallGenerator {
66 private:
67 bool _is_osr;
68 float _expected_uses;
69
70 public:
71 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
72 : InlineCallGenerator(method)
73 {
74 _is_osr = is_osr;
75 _expected_uses = expected_uses;
76 assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
77 }
78
79 virtual bool is_parse() const { return true; }
80 virtual JVMState* generate(JVMState* jvms);
81 int is_osr() { return _is_osr; }
82
83 };
84
85 JVMState* ParseGenerator::generate(JVMState* jvms) {
86 Compile* C = Compile::current();
87
88 if (is_osr()) {
89 // The JVMS for a OSR has a single argument (see its TypeFunc).
90 assert(jvms->depth() == 1, "no inline OSR");
91 }
92
93 if (C->failing()) {
94 return nullptr; // bailing out of the compile; do not try to parse
95 }
96
97 Parse parser(jvms, method(), _expected_uses);
98 if (C->failing()) return nullptr;
99
100 // Grab signature for matching/allocation
101 GraphKit& exits = parser.exits();
102
103 if (C->failing()) {
104 while (exits.pop_exception_state() != nullptr) ;
105 return nullptr;
106 }
107
108 assert(exits.jvms()->same_calls_as(jvms), "sanity");
109
110 // Simply return the exit state of the parser,
111 // augmented by any exceptional states.
112 return exits.transfer_exceptions_into_jvms();
113 }
114
115 //---------------------------DirectCallGenerator------------------------------
116 // Internal class which handles all out-of-line calls w/o receiver type checks.
117 class DirectCallGenerator : public CallGenerator {
118 private:
119 CallStaticJavaNode* _call_node;
120 // Force separate memory and I/O projections for the exceptional
121 // paths to facilitate late inlinig.
122 bool _separate_io_proj;
123
124 protected:
125 void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126
127 public:
128 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129 : CallGenerator(method),
130 _separate_io_proj(separate_io_proj)
131 {
132 }
133 virtual JVMState* generate(JVMState* jvms);
134
135 virtual CallNode* call_node() const { return _call_node; }
136 virtual CallGenerator* with_call_node(CallNode* call) {
137 DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138 dcg->set_call_node(call->as_CallStaticJava());
139 return dcg;
140 }
141 };
142
143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144 GraphKit kit(jvms);
145 bool is_static = method()->is_static();
146 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
147 : SharedRuntime::get_resolve_opt_virtual_call_stub();
148
149 if (kit.C->log() != nullptr) {
150 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
151 }
152
153 CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
154 if (is_inlined_method_handle_intrinsic(jvms, method())) {
155 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
156 // additional information about the method being invoked should be attached
157 // to the call site to make resolution logic work
158 // (see SharedRuntime::resolve_static_call_C).
159 call->set_override_symbolic_info(true);
160 }
161 _call_node = call; // Save the call node in case we need it later
162 if (!is_static) {
163 // Make an explicit receiver null_check as part of this call.
164 // Since we share a map with the caller, his JVMS gets adjusted.
165 kit.null_check_receiver_before_call(method());
166 if (kit.stopped()) {
167 // And dump it back to the caller, decorated with any exceptions:
168 return kit.transfer_exceptions_into_jvms();
169 }
170 // Mark the call node as virtual, sort of:
171 call->set_optimized_virtual(true);
172 }
173 kit.set_arguments_for_java_call(call);
174 kit.set_edges_for_java_call(call, false, _separate_io_proj);
175 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
176 kit.push_node(method()->return_type()->basic_type(), ret);
177 return kit.transfer_exceptions_into_jvms();
178 }
179
180 //--------------------------VirtualCallGenerator------------------------------
181 // Internal class which handles all out-of-line calls checking receiver type.
182 class VirtualCallGenerator : public CallGenerator {
183 private:
184 int _vtable_index;
185 bool _separate_io_proj;
186 CallDynamicJavaNode* _call_node;
187
188 protected:
189 void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
190
191 public:
192 VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
193 : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
194 {
195 assert(vtable_index == Method::invalid_vtable_index ||
196 vtable_index >= 0, "either invalid or usable");
197 }
198 virtual bool is_virtual() const { return true; }
199 virtual JVMState* generate(JVMState* jvms);
200
201 virtual CallNode* call_node() const { return _call_node; }
202 int vtable_index() const { return _vtable_index; }
203
204 virtual CallGenerator* with_call_node(CallNode* call) {
205 VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
206 cg->set_call_node(call->as_CallDynamicJava());
207 return cg;
208 }
209 };
210
211 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
212 GraphKit kit(jvms);
213 Node* receiver = kit.argument(0);
214
215 if (kit.C->log() != nullptr) {
216 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
217 }
218
219 // If the receiver is a constant null, do not torture the system
220 // by attempting to call through it. The compile will proceed
221 // correctly, but may bail out in final_graph_reshaping, because
222 // the call instruction will have a seemingly deficient out-count.
223 // (The bailout says something misleading about an "infinite loop".)
224 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
225 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
226 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
227 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
228 kit.inc_sp(arg_size); // restore arguments
229 kit.uncommon_trap(Deoptimization::Reason_null_check,
230 Deoptimization::Action_none,
231 nullptr, "null receiver");
232 return kit.transfer_exceptions_into_jvms();
233 }
234
235 // Ideally we would unconditionally do a null check here and let it
236 // be converted to an implicit check based on profile information.
237 // However currently the conversion to implicit null checks in
238 // Block::implicit_null_check() only looks for loads and stores, not calls.
239 ciMethod *caller = kit.method();
240 ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
241 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
242 ((ImplicitNullCheckThreshold > 0) && caller_md &&
243 (caller_md->trap_count(Deoptimization::Reason_null_check)
244 >= (uint)ImplicitNullCheckThreshold))) {
245 // Make an explicit receiver null_check as part of this call.
246 // Since we share a map with the caller, his JVMS gets adjusted.
247 receiver = kit.null_check_receiver_before_call(method());
248 if (kit.stopped()) {
249 // And dump it back to the caller, decorated with any exceptions:
250 return kit.transfer_exceptions_into_jvms();
251 }
252 }
253
254 assert(!method()->is_static(), "virtual call must not be to static");
255 assert(!method()->is_final(), "virtual call should not be to final");
256 assert(!method()->is_private(), "virtual call should not be to private");
257 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
258 "no vtable calls if +UseInlineCaches ");
259 address target = SharedRuntime::get_resolve_virtual_call_stub();
260 // Normal inline cache used for call
261 CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
262 if (is_inlined_method_handle_intrinsic(jvms, method())) {
263 // To be able to issue a direct call (optimized virtual or virtual)
264 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
265 // about the method being invoked should be attached to the call site to
266 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
267 call->set_override_symbolic_info(true);
268 }
269 _call_node = call; // Save the call node in case we need it later
270
271 kit.set_arguments_for_java_call(call);
272 kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
273 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
274 kit.push_node(method()->return_type()->basic_type(), ret);
275
276 // Represent the effect of an implicit receiver null_check
277 // as part of this call. Since we share a map with the caller,
278 // his JVMS gets adjusted.
279 kit.cast_not_null(receiver);
280 return kit.transfer_exceptions_into_jvms();
281 }
282
283 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
284 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
285 return new ParseGenerator(m, expected_uses);
286 }
287
288 // As a special case, the JVMS passed to this CallGenerator is
289 // for the method execution already in progress, not just the JVMS
290 // of the caller. Thus, this CallGenerator cannot be mixed with others!
291 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
292 if (InlineTree::check_can_parse(m) != nullptr) return nullptr;
293 float past_uses = m->interpreter_invocation_count();
294 float expected_uses = past_uses;
295 return new ParseGenerator(m, expected_uses, true);
296 }
297
298 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
299 assert(!m->is_abstract(), "for_direct_call mismatch");
300 return new DirectCallGenerator(m, separate_io_proj);
301 }
302
303 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
304 assert(!m->is_static(), "for_virtual_call mismatch");
305 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
306 return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
307 }
308
309 // Allow inlining decisions to be delayed
310 class LateInlineCallGenerator : public DirectCallGenerator {
311 private:
312 jlong _unique_id; // unique id for log compilation
313 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
314
315 protected:
316 CallGenerator* _inline_cg;
317 virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
318 virtual CallGenerator* inline_cg() const { return _inline_cg; }
319 virtual bool is_pure_call() const { return _is_pure_call; }
320
321 public:
322 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
323 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
324
325 virtual bool is_late_inline() const { return true; }
326
327 // Convert the CallStaticJava into an inline
328 virtual void do_late_inline();
329
330 virtual JVMState* generate(JVMState* jvms) {
331 Compile *C = Compile::current();
332
333 C->log_inline_id(this);
334
335 // Record that this call site should be revisited once the main
336 // parse is finished.
337 if (!is_mh_late_inline()) {
338 C->add_late_inline(this);
339 }
340
341 // Emit the CallStaticJava and request separate projections so
342 // that the late inlining logic can distinguish between fall
343 // through and exceptional uses of the memory and io projections
344 // as is done for allocations and macro expansion.
345 return DirectCallGenerator::generate(jvms);
346 }
347
348 virtual void set_unique_id(jlong id) {
349 _unique_id = id;
350 }
351
352 virtual jlong unique_id() const {
353 return _unique_id;
354 }
355
356 virtual CallGenerator* with_call_node(CallNode* call) {
357 LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
358 cg->set_call_node(call->as_CallStaticJava());
359 return cg;
360 }
361 };
362
363 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
364 return new LateInlineCallGenerator(method, inline_cg);
365 }
366
367 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
368 ciMethod* _caller;
369 bool _input_not_const;
370
371 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
372
373 public:
374 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
375 LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
376
377 virtual bool is_mh_late_inline() const { return true; }
378
379 // Convert the CallStaticJava into an inline
380 virtual void do_late_inline();
381
382 virtual JVMState* generate(JVMState* jvms) {
383 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
384
385 Compile* C = Compile::current();
386 if (_input_not_const) {
387 // inlining won't be possible so no need to enqueue right now.
388 call_node()->set_generator(this);
389 } else {
390 C->add_late_inline(this);
391 }
392 return new_jvms;
393 }
394
395 virtual CallGenerator* with_call_node(CallNode* call) {
396 LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
397 cg->set_call_node(call->as_CallStaticJava());
398 return cg;
399 }
400 };
401
402 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
403 // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
404 // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
405 // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
406 // of late inlining with exceptions.
407 assert(!jvms->method()->has_exception_handlers() ||
408 (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
409 method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
410 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
411 bool allow_inline = C->inlining_incrementally();
412 bool input_not_const = true;
413 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
414 assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
415
416 if (cg != nullptr) {
417 if (!allow_inline) {
418 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
419 "late method handle call resolution");
420 }
421 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || cg->is_virtual_late_inline() ||
422 AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
423 _inline_cg = cg;
424 return true;
425 } else {
426 // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
427 // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
428 // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
429 return false;
430 }
431 }
432
433 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
434 assert(IncrementalInlineMH, "required");
435 Compile::current()->mark_has_mh_late_inlines();
436 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
437 return cg;
438 }
439
440 // Allow inlining decisions to be delayed
441 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
442 private:
443 jlong _unique_id; // unique id for log compilation
444 CallGenerator* _inline_cg;
445 ciMethod* _callee;
446 bool _is_pure_call;
447 float _prof_factor;
448
449 protected:
450 virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
451 virtual CallGenerator* inline_cg() const { return _inline_cg; }
452 virtual bool is_pure_call() const { return _is_pure_call; }
453
454 public:
455 LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
456 : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
457 _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
458 assert(IncrementalInlineVirtual, "required");
459 }
460
461 virtual bool is_late_inline() const { return true; }
462
463 virtual bool is_virtual_late_inline() const { return true; }
464
465 // Convert the CallDynamicJava into an inline
466 virtual void do_late_inline();
467
468 virtual ciMethod* callee_method() {
469 return _callee;
470 }
471
472 virtual void set_callee_method(ciMethod* m) {
473 assert(_callee == nullptr || _callee == m, "repeated inline attempt with different callee");
474 _callee = m;
475 }
476
477 virtual JVMState* generate(JVMState* jvms) {
478 // Emit the CallDynamicJava and request separate projections so
479 // that the late inlining logic can distinguish between fall
480 // through and exceptional uses of the memory and io projections
481 // as is done for allocations and macro expansion.
482 JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
483 if (call_node() != nullptr) {
484 call_node()->set_generator(this);
485 }
486 return new_jvms;
487 }
488
489 virtual void set_unique_id(jlong id) {
490 _unique_id = id;
491 }
492
493 virtual jlong unique_id() const {
494 return _unique_id;
495 }
496
497 virtual CallGenerator* with_call_node(CallNode* call) {
498 LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
499 cg->set_call_node(call->as_CallDynamicJava());
500 return cg;
501 }
502 };
503
504 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
505 // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
506 // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
507
508 // Implicit receiver null checks introduce problems when exception states are combined.
509 Node* receiver = jvms->map()->argument(jvms, 0);
510 const Type* recv_type = C->initial_gvn()->type(receiver);
511 if (recv_type->maybe_null()) {
512 C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
513 "late call devirtualization failed (receiver may be null)");
514 return false;
515 }
516 // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
517 bool allow_inline = C->inlining_incrementally();
518 if (!allow_inline && _callee->holder()->is_interface()) {
519 // Don't convert the interface call to a direct call guarded by an interface subtype check.
520 C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
521 "late call devirtualization failed (interface call)");
522 return false;
523 }
524 CallGenerator* cg = C->call_generator(_callee,
525 vtable_index(),
526 false /*call_does_dispatch*/,
527 jvms,
528 allow_inline,
529 _prof_factor,
530 nullptr /*speculative_receiver_type*/,
531 true /*allow_intrinsics*/);
532
533 if (cg != nullptr) {
534 if (!allow_inline) {
535 C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
536 }
537 assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
538 _inline_cg = cg;
539 return true;
540 } else {
541 // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
542 assert(false, "no progress");
543 return false;
544 }
545 }
546
547 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
548 assert(IncrementalInlineVirtual, "required");
549 assert(!m->is_static(), "for_virtual_call mismatch");
550 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
551 return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
552 }
553
554 void LateInlineCallGenerator::do_late_inline() {
555 CallGenerator::do_late_inline_helper();
556 }
557
558 void LateInlineMHCallGenerator::do_late_inline() {
559 CallGenerator::do_late_inline_helper();
560 }
561
562 void LateInlineVirtualCallGenerator::do_late_inline() {
563 assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
564 CallGenerator::do_late_inline_helper();
565 }
566
567 void CallGenerator::do_late_inline_helper() {
568 assert(is_late_inline(), "only late inline allowed");
569
570 // Can't inline it
571 CallNode* call = call_node();
572 if (call == nullptr || call->outcnt() == 0 ||
573 call->in(0) == nullptr || call->in(0)->is_top()) {
574 return;
575 }
576
577 const TypeTuple *r = call->tf()->domain();
578 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
579 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
580 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
581 return;
582 }
583 }
584
585 if (call->in(TypeFunc::Memory)->is_top()) {
586 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
587 return;
588 }
589 if (call->in(TypeFunc::Memory)->is_MergeMem()) {
590 MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
591 if (merge_mem->base_memory() == merge_mem->empty_memory()) {
592 return; // dead path
593 }
594 }
595
596 // check for unreachable loop
597 CallProjections callprojs;
598 // Similar to incremental inlining, don't assert that all call
599 // projections are still there for post-parse call devirtualization.
600 bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
601 call->extract_projections(&callprojs, true, do_asserts);
602 if ((callprojs.fallthrough_catchproj == call->in(0)) ||
603 (callprojs.catchall_catchproj == call->in(0)) ||
604 (callprojs.fallthrough_memproj == call->in(TypeFunc::Memory)) ||
605 (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) ||
606 (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) ||
607 (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) ||
608 (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) ||
609 (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) {
610 return;
611 }
612
613 Compile* C = Compile::current();
614
615 uint endoff = call->jvms()->endoff();
616 if (C->inlining_incrementally()) {
617 // No reachability edges should be present when incremental inlining takes place.
618 // Inlining logic doesn't expect any extra edges past debug info and fails with
619 // an assert in SafePointNode::grow_stack.
620 assert(endoff == call->req(), "reachability edges not supported");
621 } else {
622 if (call->req() > endoff) { // reachability edges present
623 assert(OptimizeReachabilityFences, "required");
624 return; // keep the original call node as the holder of reachability info
625 }
626 }
627
628 // Remove inlined methods from Compiler's lists.
629 if (call->is_macro()) {
630 C->remove_macro_node(call);
631 }
632
633 // The call is marked as pure (no important side effects), but result isn't used.
634 // It's safe to remove the call.
635 bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0);
636
637 if (is_pure_call() && result_not_used) {
638 GraphKit kit(call->jvms());
639 kit.replace_call(call, C->top(), true, do_asserts);
640 } else {
641 // Make a clone of the JVMState that appropriate to use for driving a parse
642 JVMState* old_jvms = call->jvms();
643 JVMState* jvms = old_jvms->clone_shallow(C);
644 uint size = call->req();
645 SafePointNode* map = new SafePointNode(size, jvms);
646 for (uint i1 = 0; i1 < size; i1++) {
647 map->init_req(i1, call->in(i1));
648 }
649
650 // Make sure the state is a MergeMem for parsing.
651 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
652 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
653 C->initial_gvn()->set_type_bottom(mem);
654 map->set_req(TypeFunc::Memory, mem);
655 }
656
657 uint nargs = method()->arg_size();
658 // blow away old call arguments
659 Node* top = C->top();
660 for (uint i1 = 0; i1 < nargs; i1++) {
661 map->set_req(TypeFunc::Parms + i1, top);
662 }
663 jvms->set_map(map);
664
665 // Make enough space in the expression stack to transfer
666 // the incoming arguments and return value.
667 map->ensure_stack(jvms, jvms->method()->max_stack());
668 for (uint i1 = 0; i1 < nargs; i1++) {
669 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
670 }
671
672 C->log_late_inline(this);
673
674 // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
675 if (!do_late_inline_check(C, jvms)) {
676 map->disconnect_inputs(C);
677 return;
678 }
679
680 // Setup default node notes to be picked up by the inlining
681 Node_Notes* old_nn = C->node_notes_at(call->_idx);
682 if (old_nn != nullptr) {
683 Node_Notes* entry_nn = old_nn->clone(C);
684 entry_nn->set_jvms(jvms);
685 C->set_default_node_notes(entry_nn);
686 }
687
688 // Now perform the inlining using the synthesized JVMState
689 JVMState* new_jvms = inline_cg()->generate(jvms);
690 if (new_jvms == nullptr) return; // no change
691 if (C->failing()) return;
692
693 if (is_mh_late_inline()) {
694 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
695 } else if (is_string_late_inline()) {
696 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
697 } else if (is_boxing_late_inline()) {
698 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
699 } else if (is_vector_reboxing_late_inline()) {
700 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
701 } else {
702 C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
703 }
704
705 // Capture any exceptional control flow
706 GraphKit kit(new_jvms);
707
708 // Find the result object
709 Node* result = C->top();
710 int result_size = method()->return_type()->size();
711 if (result_size != 0 && !kit.stopped()) {
712 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
713 }
714
715 if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
716 result = kit.must_be_not_null(result, false);
717 }
718
719 if (inline_cg()->is_inline()) {
720 C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
721 C->env()->notice_inlined_method(inline_cg()->method());
722 }
723 C->set_inlining_progress(true);
724 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
725 kit.replace_call(call, result, true, do_asserts);
726 }
727 }
728
729 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
730
731 public:
732 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
733 LateInlineCallGenerator(method, inline_cg) {}
734
735 virtual JVMState* generate(JVMState* jvms) {
736 Compile *C = Compile::current();
737
738 C->log_inline_id(this);
739
740 C->add_string_late_inline(this);
741
742 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
743 return new_jvms;
744 }
745
746 virtual bool is_string_late_inline() const { return true; }
747
748 virtual CallGenerator* with_call_node(CallNode* call) {
749 LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
750 cg->set_call_node(call->as_CallStaticJava());
751 return cg;
752 }
753 };
754
755 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
756 return new LateInlineStringCallGenerator(method, inline_cg);
757 }
758
759 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
760
761 public:
762 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
763 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
764
765 virtual JVMState* generate(JVMState* jvms) {
766 Compile *C = Compile::current();
767
768 C->log_inline_id(this);
769
770 C->add_boxing_late_inline(this);
771
772 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
773 return new_jvms;
774 }
775
776 virtual bool is_boxing_late_inline() const { return true; }
777
778 virtual CallGenerator* with_call_node(CallNode* call) {
779 LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
780 cg->set_call_node(call->as_CallStaticJava());
781 return cg;
782 }
783 };
784
785 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
786 return new LateInlineBoxingCallGenerator(method, inline_cg);
787 }
788
789 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
790
791 public:
792 LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
793 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
794
795 virtual JVMState* generate(JVMState* jvms) {
796 Compile *C = Compile::current();
797
798 C->log_inline_id(this);
799
800 C->add_vector_reboxing_late_inline(this);
801
802 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
803 return new_jvms;
804 }
805
806 virtual bool is_vector_reboxing_late_inline() const { return true; }
807
808 virtual CallGenerator* with_call_node(CallNode* call) {
809 LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
810 cg->set_call_node(call->as_CallStaticJava());
811 return cg;
812 }
813 };
814
815 // static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
816 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
817 return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
818 }
819
820 //------------------------PredictedCallGenerator------------------------------
821 // Internal class which handles all out-of-line calls checking receiver type.
822 class PredictedCallGenerator : public CallGenerator {
823 ciKlass* _predicted_receiver;
824 CallGenerator* _if_missed;
825 CallGenerator* _if_hit;
826 float _hit_prob;
827 bool _exact_check;
828
829 public:
830 PredictedCallGenerator(ciKlass* predicted_receiver,
831 CallGenerator* if_missed,
832 CallGenerator* if_hit, bool exact_check,
833 float hit_prob)
834 : CallGenerator(if_missed->method())
835 {
836 // The call profile data may predict the hit_prob as extreme as 0 or 1.
837 // Remove the extremes values from the range.
838 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
839 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
840
841 _predicted_receiver = predicted_receiver;
842 _if_missed = if_missed;
843 _if_hit = if_hit;
844 _hit_prob = hit_prob;
845 _exact_check = exact_check;
846 }
847
848 virtual bool is_virtual() const { return true; }
849 virtual bool is_inline() const { return _if_hit->is_inline(); }
850 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
851
852 virtual JVMState* generate(JVMState* jvms);
853 };
854
855
856 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
857 CallGenerator* if_missed,
858 CallGenerator* if_hit,
859 float hit_prob) {
860 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
861 /*exact_check=*/true, hit_prob);
862 }
863
864 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
865 CallGenerator* if_missed,
866 CallGenerator* if_hit) {
867 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
868 /*exact_check=*/false, PROB_ALWAYS);
869 }
870
871 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
872 GraphKit kit(jvms);
873 PhaseGVN& gvn = kit.gvn();
874 // We need an explicit receiver null_check before checking its type.
875 // We share a map with the caller, so his JVMS gets adjusted.
876 Node* receiver = kit.argument(0);
877 CompileLog* log = kit.C->log();
878 if (log != nullptr) {
879 log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
880 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
881 }
882
883 receiver = kit.null_check_receiver_before_call(method());
884 if (kit.stopped()) {
885 return kit.transfer_exceptions_into_jvms();
886 }
887
888 // Make a copy of the replaced nodes in case we need to restore them
889 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
890 replaced_nodes.clone();
891
892 Node* casted_receiver = receiver; // will get updated in place...
893 Node* slow_ctl = nullptr;
894 if (_exact_check) {
895 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
896 &casted_receiver);
897 } else {
898 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
899 &casted_receiver);
900 }
901
902 SafePointNode* slow_map = nullptr;
903 JVMState* slow_jvms = nullptr;
904 { PreserveJVMState pjvms(&kit);
905 kit.set_control(slow_ctl);
906 if (!kit.stopped()) {
907 slow_jvms = _if_missed->generate(kit.sync_jvms());
908 if (kit.failing())
909 return nullptr; // might happen because of NodeCountInliningCutoff
910 assert(slow_jvms != nullptr, "must be");
911 kit.add_exception_states_from(slow_jvms);
912 kit.set_map(slow_jvms->map());
913 if (!kit.stopped())
914 slow_map = kit.stop();
915 }
916 }
917
918 if (kit.stopped()) {
919 // Instance does not match the predicted type.
920 kit.set_jvms(slow_jvms);
921 return kit.transfer_exceptions_into_jvms();
922 }
923
924 // Fall through if the instance matches the desired type.
925 kit.replace_in_map(receiver, casted_receiver);
926
927 // Make the hot call:
928 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
929 if (kit.failing()) {
930 return nullptr;
931 }
932 if (new_jvms == nullptr) {
933 // Inline failed, so make a direct call.
934 assert(_if_hit->is_inline(), "must have been a failed inline");
935 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
936 new_jvms = cg->generate(kit.sync_jvms());
937 }
938 kit.add_exception_states_from(new_jvms);
939 kit.set_jvms(new_jvms);
940
941 // Need to merge slow and fast?
942 if (slow_map == nullptr) {
943 // The fast path is the only path remaining.
944 return kit.transfer_exceptions_into_jvms();
945 }
946
947 if (kit.stopped()) {
948 // Inlined method threw an exception, so it's just the slow path after all.
949 kit.set_jvms(slow_jvms);
950 return kit.transfer_exceptions_into_jvms();
951 }
952
953 // There are 2 branches and the replaced nodes are only valid on
954 // one: restore the replaced nodes to what they were before the
955 // branch.
956 kit.map()->set_replaced_nodes(replaced_nodes);
957
958 // Finish the diamond.
959 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
960 RegionNode* region = new RegionNode(3);
961 region->init_req(1, kit.control());
962 region->init_req(2, slow_map->control());
963 kit.set_control(gvn.transform(region));
964 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
965 iophi->set_req(2, slow_map->i_o());
966 kit.set_i_o(gvn.transform(iophi));
967 // Merge memory
968 kit.merge_memory(slow_map->merged_memory(), region, 2);
969 // Transform new memory Phis.
970 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
971 Node* phi = mms.memory();
972 if (phi->is_Phi() && phi->in(0) == region) {
973 mms.set_memory(gvn.transform(phi));
974 }
975 }
976 uint tos = kit.jvms()->stkoff() + kit.sp();
977 uint limit = slow_map->req();
978 for (uint i = TypeFunc::Parms; i < limit; i++) {
979 // Skip unused stack slots; fast forward to monoff();
980 if (i == tos) {
981 i = kit.jvms()->monoff();
982 if( i >= limit ) break;
983 }
984 Node* m = kit.map()->in(i);
985 Node* n = slow_map->in(i);
986 if (m != n) {
987 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
988 Node* phi = PhiNode::make(region, m, t);
989 phi->set_req(2, n);
990 kit.map()->set_req(i, gvn.transform(phi));
991 }
992 }
993 return kit.transfer_exceptions_into_jvms();
994 }
995
996
997 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
998 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
999 bool input_not_const;
1000 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1001 Compile* C = Compile::current();
1002 bool should_delay = C->should_delay_inlining();
1003 if (cg != nullptr) {
1004 if (should_delay && IncrementalInlineMH) {
1005 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1006 } else {
1007 return cg;
1008 }
1009 }
1010 int bci = jvms->bci();
1011 ciCallProfile profile = caller->call_profile_at_bci(bci);
1012 int call_site_count = caller->scale_count(profile.count());
1013
1014 if (IncrementalInlineMH && call_site_count > 0 &&
1015 (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1016 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1017 } else {
1018 // Out-of-line call.
1019 return CallGenerator::for_direct_call(callee);
1020 }
1021 }
1022
1023 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1024 GraphKit kit(jvms);
1025 PhaseGVN& gvn = kit.gvn();
1026 Compile* C = kit.C;
1027 vmIntrinsics::ID iid = callee->intrinsic_id();
1028 input_not_const = true;
1029 if (StressMethodHandleLinkerInlining) {
1030 allow_inline = false;
1031 }
1032 switch (iid) {
1033 case vmIntrinsics::_invokeBasic:
1034 {
1035 // Get MethodHandle receiver:
1036 Node* receiver = kit.argument(0);
1037 if (receiver->Opcode() == Op_ConP) {
1038 input_not_const = false;
1039 const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1040 if (recv_toop != nullptr) {
1041 ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1042 const int vtable_index = Method::invalid_vtable_index;
1043
1044 if (!ciMethod::is_consistent_info(callee, target)) {
1045 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1046 return nullptr;
1047 }
1048
1049 CallGenerator *cg = C->call_generator(target, vtable_index,
1050 false /* call_does_dispatch */,
1051 jvms,
1052 allow_inline,
1053 PROB_ALWAYS);
1054 return cg;
1055 } else {
1056 assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1057 Type::str(receiver->bottom_type()));
1058 print_inlining_failure(C, callee, jvms, "receiver is always null");
1059 }
1060 } else {
1061 print_inlining_failure(C, callee, jvms, "receiver not constant");
1062 }
1063 } break;
1064
1065 case vmIntrinsics::_linkToVirtual:
1066 case vmIntrinsics::_linkToStatic:
1067 case vmIntrinsics::_linkToSpecial:
1068 case vmIntrinsics::_linkToInterface:
1069 {
1070 // Get MemberName argument:
1071 Node* member_name = kit.argument(callee->arg_size() - 1);
1072 if (member_name->Opcode() == Op_ConP) {
1073 input_not_const = false;
1074 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1075 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1076
1077 if (!ciMethod::is_consistent_info(callee, target)) {
1078 print_inlining_failure(C, callee, jvms, "signatures mismatch");
1079 return nullptr;
1080 }
1081
1082 // In lambda forms we erase signature types to avoid resolving issues
1083 // involving class loaders. When we optimize a method handle invoke
1084 // to a direct call we must cast the receiver and arguments to its
1085 // actual types.
1086 ciSignature* signature = target->signature();
1087 const int receiver_skip = target->is_static() ? 0 : 1;
1088 // Cast receiver to its type.
1089 if (!target->is_static()) {
1090 Node* recv = kit.argument(0);
1091 Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1092 if (casted_recv->is_top()) {
1093 print_inlining_failure(C, callee, jvms, "argument types mismatch");
1094 return nullptr; // FIXME: effectively dead; issue a halt node instead
1095 } else if (casted_recv != recv) {
1096 kit.set_argument(0, casted_recv);
1097 }
1098 }
1099 // Cast reference arguments to its type.
1100 for (int i = 0, j = 0; i < signature->count(); i++) {
1101 ciType* t = signature->type_at(i);
1102 if (t->is_klass()) {
1103 Node* arg = kit.argument(receiver_skip + j);
1104 Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1105 if (casted_arg->is_top()) {
1106 print_inlining_failure(C, callee, jvms, "argument types mismatch");
1107 return nullptr; // FIXME: effectively dead; issue a halt node instead
1108 } else if (casted_arg != arg) {
1109 kit.set_argument(receiver_skip + j, casted_arg);
1110 }
1111 }
1112 j += t->size(); // long and double take two slots
1113 }
1114
1115 // Try to get the most accurate receiver type
1116 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1117 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1118 int vtable_index = Method::invalid_vtable_index;
1119 bool call_does_dispatch = false;
1120
1121 ciKlass* speculative_receiver_type = nullptr;
1122 if (is_virtual_or_interface) {
1123 ciInstanceKlass* klass = target->holder();
1124 Node* receiver_node = kit.argument(0);
1125 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1126 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1127 // optimize_virtual_call() takes 2 different holder
1128 // arguments for a corner case that doesn't apply here (see
1129 // Parse::do_call())
1130 target = C->optimize_virtual_call(caller, klass, klass,
1131 target, receiver_type, is_virtual,
1132 call_does_dispatch, vtable_index, // out-parameters
1133 false /* check_access */);
1134 // We lack profiling at this call but type speculation may
1135 // provide us with a type
1136 speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1137 }
1138 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1139 allow_inline,
1140 PROB_ALWAYS,
1141 speculative_receiver_type);
1142 return cg;
1143 } else {
1144 print_inlining_failure(C, callee, jvms, "member_name not constant");
1145 }
1146 } break;
1147
1148 case vmIntrinsics::_linkToNative:
1149 print_inlining_failure(C, callee, jvms, "native call");
1150 break;
1151
1152 default:
1153 fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1154 break;
1155 }
1156 return nullptr;
1157 }
1158
1159 //------------------------PredicatedIntrinsicGenerator------------------------------
1160 // Internal class which handles all predicated Intrinsic calls.
1161 class PredicatedIntrinsicGenerator : public CallGenerator {
1162 CallGenerator* _intrinsic;
1163 CallGenerator* _cg;
1164
1165 public:
1166 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1167 CallGenerator* cg)
1168 : CallGenerator(cg->method())
1169 {
1170 _intrinsic = intrinsic;
1171 _cg = cg;
1172 }
1173
1174 virtual bool is_virtual() const { return true; }
1175 virtual bool is_inline() const { return true; }
1176 virtual bool is_intrinsic() const { return true; }
1177
1178 virtual JVMState* generate(JVMState* jvms);
1179 };
1180
1181
1182 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1183 CallGenerator* cg) {
1184 return new PredicatedIntrinsicGenerator(intrinsic, cg);
1185 }
1186
1187
1188 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1189 // The code we want to generate here is:
1190 // if (receiver == nullptr)
1191 // uncommon_Trap
1192 // if (predicate(0))
1193 // do_intrinsic(0)
1194 // else
1195 // if (predicate(1))
1196 // do_intrinsic(1)
1197 // ...
1198 // else
1199 // do_java_comp
1200
1201 GraphKit kit(jvms);
1202 PhaseGVN& gvn = kit.gvn();
1203
1204 CompileLog* log = kit.C->log();
1205 if (log != nullptr) {
1206 log->elem("predicated_intrinsic bci='%d' method='%d'",
1207 jvms->bci(), log->identify(method()));
1208 }
1209
1210 if (!method()->is_static()) {
1211 // We need an explicit receiver null_check before checking its type in predicate.
1212 // We share a map with the caller, so his JVMS gets adjusted.
1213 Node* receiver = kit.null_check_receiver_before_call(method());
1214 if (kit.stopped()) {
1215 return kit.transfer_exceptions_into_jvms();
1216 }
1217 }
1218
1219 int n_predicates = _intrinsic->predicates_count();
1220 assert(n_predicates > 0, "sanity");
1221
1222 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1223
1224 // Region for normal compilation code if intrinsic failed.
1225 Node* slow_region = new RegionNode(1);
1226
1227 int results = 0;
1228 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1229 #ifdef ASSERT
1230 JVMState* old_jvms = kit.jvms();
1231 SafePointNode* old_map = kit.map();
1232 Node* old_io = old_map->i_o();
1233 Node* old_mem = old_map->memory();
1234 Node* old_exc = old_map->next_exception();
1235 #endif
1236 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1237 #ifdef ASSERT
1238 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1239 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1240 SafePointNode* new_map = kit.map();
1241 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1242 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1243 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1244 #endif
1245 if (!kit.stopped()) {
1246 PreserveJVMState pjvms(&kit);
1247 // Generate intrinsic code:
1248 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1249 if (kit.failing()) {
1250 return nullptr;
1251 }
1252 if (new_jvms == nullptr) {
1253 // Intrinsic failed, use normal compilation path for this predicate.
1254 slow_region->add_req(kit.control());
1255 } else {
1256 kit.add_exception_states_from(new_jvms);
1257 kit.set_jvms(new_jvms);
1258 if (!kit.stopped()) {
1259 result_jvms[results++] = kit.jvms();
1260 }
1261 }
1262 }
1263 if (else_ctrl == nullptr) {
1264 else_ctrl = kit.C->top();
1265 }
1266 kit.set_control(else_ctrl);
1267 }
1268 if (!kit.stopped()) {
1269 // Final 'else' after predicates.
1270 slow_region->add_req(kit.control());
1271 }
1272 if (slow_region->req() > 1) {
1273 PreserveJVMState pjvms(&kit);
1274 // Generate normal compilation code:
1275 kit.set_control(gvn.transform(slow_region));
1276 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1277 if (kit.failing())
1278 return nullptr; // might happen because of NodeCountInliningCutoff
1279 assert(new_jvms != nullptr, "must be");
1280 kit.add_exception_states_from(new_jvms);
1281 kit.set_jvms(new_jvms);
1282 if (!kit.stopped()) {
1283 result_jvms[results++] = kit.jvms();
1284 }
1285 }
1286
1287 if (results == 0) {
1288 // All paths ended in uncommon traps.
1289 (void) kit.stop();
1290 return kit.transfer_exceptions_into_jvms();
1291 }
1292
1293 if (results == 1) { // Only one path
1294 kit.set_jvms(result_jvms[0]);
1295 return kit.transfer_exceptions_into_jvms();
1296 }
1297
1298 // Merge all paths.
1299 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1300 RegionNode* region = new RegionNode(results + 1);
1301 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1302 for (int i = 0; i < results; i++) {
1303 JVMState* jvms = result_jvms[i];
1304 int path = i + 1;
1305 SafePointNode* map = jvms->map();
1306 region->init_req(path, map->control());
1307 iophi->set_req(path, map->i_o());
1308 if (i == 0) {
1309 kit.set_jvms(jvms);
1310 } else {
1311 kit.merge_memory(map->merged_memory(), region, path);
1312 }
1313 }
1314 kit.set_control(gvn.transform(region));
1315 kit.set_i_o(gvn.transform(iophi));
1316 // Transform new memory Phis.
1317 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1318 Node* phi = mms.memory();
1319 if (phi->is_Phi() && phi->in(0) == region) {
1320 mms.set_memory(gvn.transform(phi));
1321 }
1322 }
1323
1324 // Merge debug info.
1325 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1326 uint tos = kit.jvms()->stkoff() + kit.sp();
1327 Node* map = kit.map();
1328 uint limit = map->req();
1329 for (uint i = TypeFunc::Parms; i < limit; i++) {
1330 // Skip unused stack slots; fast forward to monoff();
1331 if (i == tos) {
1332 i = kit.jvms()->monoff();
1333 if( i >= limit ) break;
1334 }
1335 Node* n = map->in(i);
1336 ins[0] = n;
1337 const Type* t = gvn.type(n);
1338 bool needs_phi = false;
1339 for (int j = 1; j < results; j++) {
1340 JVMState* jvms = result_jvms[j];
1341 Node* jmap = jvms->map();
1342 Node* m = nullptr;
1343 if (jmap->req() > i) {
1344 m = jmap->in(i);
1345 if (m != n) {
1346 needs_phi = true;
1347 t = t->meet_speculative(gvn.type(m));
1348 }
1349 }
1350 ins[j] = m;
1351 }
1352 if (needs_phi) {
1353 Node* phi = PhiNode::make(region, n, t);
1354 for (int j = 1; j < results; j++) {
1355 phi->set_req(j + 1, ins[j]);
1356 }
1357 map->set_req(i, gvn.transform(phi));
1358 }
1359 }
1360
1361 return kit.transfer_exceptions_into_jvms();
1362 }
1363
1364 //-------------------------UncommonTrapCallGenerator-----------------------------
1365 // Internal class which handles all out-of-line calls checking receiver type.
1366 class UncommonTrapCallGenerator : public CallGenerator {
1367 Deoptimization::DeoptReason _reason;
1368 Deoptimization::DeoptAction _action;
1369
1370 public:
1371 UncommonTrapCallGenerator(ciMethod* m,
1372 Deoptimization::DeoptReason reason,
1373 Deoptimization::DeoptAction action)
1374 : CallGenerator(m)
1375 {
1376 _reason = reason;
1377 _action = action;
1378 }
1379
1380 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1381 virtual bool is_trap() const { return true; }
1382
1383 virtual JVMState* generate(JVMState* jvms);
1384 };
1385
1386
1387 CallGenerator*
1388 CallGenerator::for_uncommon_trap(ciMethod* m,
1389 Deoptimization::DeoptReason reason,
1390 Deoptimization::DeoptAction action) {
1391 return new UncommonTrapCallGenerator(m, reason, action);
1392 }
1393
1394
1395 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1396 GraphKit kit(jvms);
1397 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1398 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1399 // Use callsite signature always.
1400 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1401 int nargs = declared_method->arg_size();
1402 kit.inc_sp(nargs);
1403 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1404 if (_reason == Deoptimization::Reason_class_check &&
1405 _action == Deoptimization::Action_maybe_recompile) {
1406 // Temp fix for 6529811
1407 // Don't allow uncommon_trap to override our decision to recompile in the event
1408 // of a class cast failure for a monomorphic call as it will never let us convert
1409 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1410 bool keep_exact_action = true;
1411 kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1412 } else {
1413 kit.uncommon_trap(_reason, _action);
1414 }
1415 return kit.transfer_exceptions_into_jvms();
1416 }
1417
1418 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1419
1420 // (Node: Merged hook_up_exits into ParseGenerator::generate.)