1 /*
2 * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciCallSite.hpp"
26 #include "ci/ciMethodHandle.hpp"
27 #include "ci/ciSymbols.hpp"
28 #include "classfile/vmIntrinsics.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "interpreter/linkResolver.hpp"
33 #include "jvm_io.h"
34 #include "logging/log.hpp"
35 #include "logging/logLevel.hpp"
36 #include "logging/logMessage.hpp"
37 #include "logging/logStream.hpp"
38 #include "opto/addnode.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/graphKit.hpp"
43 #include "opto/inlinetypenode.hpp"
44 #include "opto/mulnode.hpp"
45 #include "opto/parse.hpp"
46 #include "opto/rootnode.hpp"
47 #include "opto/runtime.hpp"
48 #include "opto/subnode.hpp"
49 #include "prims/methodHandles.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/macros.hpp"
52 #include "utilities/ostream.hpp"
53 #if INCLUDE_JFR
54 #include "jfr/jfr.hpp"
55 #endif
56
57 static void print_trace_type_profile(outputStream* out, int depth, ciKlass* prof_klass, int site_count, int receiver_count,
58 bool with_deco) {
59 if (with_deco) {
60 CompileTask::print_inline_indent(depth, out);
61 }
62 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
63 prof_klass->name()->print_symbol_on(out);
64 if (with_deco) {
65 out->cr();
66 }
67 }
68
69 static void trace_type_profile(Compile* C, ciMethod* method, JVMState* jvms,
70 ciMethod* prof_method, ciKlass* prof_klass, int site_count, int receiver_count) {
71 int depth = jvms->depth() - 1;
72 int bci = jvms->bci();
73 if (TraceTypeProfile || C->print_inlining()) {
74 if (!C->print_inlining()) {
75 if (!PrintOpto && !PrintCompilation) {
76 method->print_short_name();
77 tty->cr();
78 }
79 CompileTask::print_inlining_tty(prof_method, depth, bci, InliningResult::SUCCESS);
80 print_trace_type_profile(tty, depth, prof_klass, site_count, receiver_count, true);
81 } else {
82 auto stream = C->inline_printer()->record(method, jvms, InliningResult::SUCCESS);
83 print_trace_type_profile(stream, depth, prof_klass, site_count, receiver_count, false);
84 }
85 }
86
87 LogTarget(Debug, jit, inlining) lt;
88 if (lt.is_enabled()) {
89 LogStream ls(lt);
90 print_trace_type_profile(&ls, depth, prof_klass, site_count, receiver_count, true);
91 }
92 }
93
94 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
95 JVMState* jvms, bool allow_inline,
96 float prof_factor, ciKlass* speculative_receiver_type,
97 bool allow_intrinsics) {
98 assert(callee != nullptr, "failed method resolution");
99
100 ciMethod* caller = jvms->method();
101 int bci = jvms->bci();
102 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
103 ciMethod* orig_callee = caller->get_method_at_bci(bci);
104
105 const bool is_virtual = (bytecode == Bytecodes::_invokevirtual) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual);
106 const bool is_interface = (bytecode == Bytecodes::_invokeinterface) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
107 const bool is_virtual_or_interface = is_virtual || is_interface;
108
109 const bool check_access = !orig_callee->is_method_handle_intrinsic(); // method handle intrinsics don't perform access checks
110
111 // Dtrace currently doesn't work unless all calls are vanilla
112 if (env()->dtrace_method_probes()) {
113 allow_inline = false;
114 }
115
116 // Note: When we get profiling during stage-1 compiles, we want to pull
117 // from more specific profile data which pertains to this inlining.
118 // Right now, ignore the information in jvms->caller(), and do method[bci].
119 ciCallProfile profile = caller->call_profile_at_bci(bci);
120
121 // See how many times this site has been invoked.
122 int site_count = profile.count();
123 int receiver_count = -1;
124 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
125 // Receivers in the profile structure are ordered by call counts
126 // so that the most called (major) receiver is profile.receiver(0).
127 receiver_count = profile.receiver_count(0);
128 }
129
130 CompileLog* log = this->log();
131 if (log != nullptr) {
132 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
133 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
134 log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
135 log->identify(callee), site_count, prof_factor);
136 if (call_does_dispatch) log->print(" virtual='1'");
137 if (allow_inline) log->print(" inline='1'");
138 if (receiver_count >= 0) {
139 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
140 if (profile.has_receiver(1)) {
141 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
142 }
143 }
144 if (callee->is_method_handle_intrinsic()) {
145 log->print(" method_handle_intrinsic='1'");
146 }
147 log->end_elem();
148 }
149
150 // Special case the handling of certain common, profitable library
151 // methods. If these methods are replaced with specialized code,
152 // then we return it as the inlined version of the call.
153 CallGenerator* cg_intrinsic = nullptr;
154 if (allow_inline && allow_intrinsics) {
155 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
156 if (cg != nullptr) {
157 if (cg->is_predicated()) {
158 // Code without intrinsic but, hopefully, inlined.
159 CallGenerator* inline_cg = this->call_generator(callee,
160 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
161 if (inline_cg != nullptr) {
162 cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
163 }
164 }
165
166 // If intrinsic does the virtual dispatch, we try to use the type profile
167 // first, and hopefully inline it as the regular virtual call below.
168 // We will retry the intrinsic if nothing had claimed it afterwards.
169 if (cg->does_virtual_dispatch()) {
170 cg_intrinsic = cg;
171 cg = nullptr;
172 } else if (IncrementalInline && should_delay_vector_inlining(callee, jvms)) {
173 return CallGenerator::for_late_inline(callee, cg);
174 } else {
175 return cg;
176 }
177 }
178 }
179
180 // Do method handle calls.
181 // NOTE: This must happen before normal inlining logic below since
182 // MethodHandle.invoke* are native methods which obviously don't
183 // have bytecodes and so normal inlining fails.
184 if (callee->is_method_handle_intrinsic()) {
185 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline);
186 return cg;
187 }
188
189 // Attempt to inline...
190 if (allow_inline) {
191 // The profile data is only partly attributable to this caller,
192 // scale back the call site information.
193 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
194 // This is the number of times we expect the call code to be used.
195 float expected_uses = past_uses;
196
197 // Try inlining a bytecoded method:
198 if (!call_does_dispatch) {
199 InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
200 bool should_delay = C->should_delay_inlining() || C->directive()->should_delay_inline(callee);
201 if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {
202 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
203 // For optimized virtual calls assert at runtime that receiver object
204 // is a subtype of the inlined method holder. CHA can report a method
205 // as a unique target under an abstract method, but receiver type
206 // sometimes has a broader type. Similar scenario is possible with
207 // default methods when type system loses information about implemented
208 // interfaces.
209 if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
210 CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
211 Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
212
213 cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
214 }
215 if (cg != nullptr) {
216 // Delay the inlining of this method to give us the
217 // opportunity to perform some high level optimizations
218 // first.
219 if (should_delay) {
220 return CallGenerator::for_late_inline(callee, cg);
221 } else if (should_delay_string_inlining(callee, jvms)) {
222 return CallGenerator::for_string_late_inline(callee, cg);
223 } else if (should_delay_boxing_inlining(callee, jvms)) {
224 return CallGenerator::for_boxing_late_inline(callee, cg);
225 } else if (should_delay_vector_reboxing_inlining(callee, jvms)) {
226 return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
227 } else {
228 return cg;
229 }
230 }
231 }
232 }
233
234 // Try using the type profile.
235 if (call_does_dispatch && site_count > 0 && UseTypeProfile) {
236 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
237 bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
238 ciMethod* receiver_method = nullptr;
239
240 int morphism = profile.morphism();
241 if (speculative_receiver_type != nullptr) {
242 if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) {
243 // We have a speculative type, we should be able to resolve
244 // the call. We do that before looking at the profiling at
245 // this invoke because it may lead to bimorphic inlining which
246 // a speculative type should help us avoid.
247 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
248 speculative_receiver_type,
249 check_access);
250 if (receiver_method == nullptr) {
251 speculative_receiver_type = nullptr;
252 } else {
253 morphism = 1;
254 }
255 } else {
256 // speculation failed before. Use profiling at the call
257 // (could allow bimorphic inlining for instance).
258 speculative_receiver_type = nullptr;
259 }
260 }
261 if (receiver_method == nullptr &&
262 (have_major_receiver || morphism == 1 ||
263 (morphism == 2 && UseBimorphicInlining))) {
264 // receiver_method = profile.method();
265 // Profiles do not suggest methods now. Look it up in the major receiver.
266 assert(check_access, "required");
267 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
268 profile.receiver(0));
269 }
270 if (receiver_method != nullptr) {
271 // The single majority receiver sufficiently outweighs the minority.
272 CallGenerator* hit_cg = this->call_generator(receiver_method,
273 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
274 if (hit_cg != nullptr) {
275 // Look up second receiver.
276 CallGenerator* next_hit_cg = nullptr;
277 ciMethod* next_receiver_method = nullptr;
278 if (morphism == 2 && UseBimorphicInlining) {
279 assert(check_access, "required");
280 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
281 profile.receiver(1));
282 if (next_receiver_method != nullptr) {
283 next_hit_cg = this->call_generator(next_receiver_method,
284 vtable_index, !call_does_dispatch, jvms,
285 allow_inline, prof_factor);
286 if (next_hit_cg != nullptr && !next_hit_cg->is_inline() &&
287 have_major_receiver && UseOnlyInlinedBimorphic) {
288 // Skip if we can't inline second receiver's method
289 next_hit_cg = nullptr;
290 }
291 }
292 }
293 CallGenerator* miss_cg;
294 Deoptimization::DeoptReason reason = (morphism == 2
295 ? Deoptimization::Reason_bimorphic
296 : Deoptimization::reason_class_check(speculative_receiver_type != nullptr));
297 if ((morphism == 1 || (morphism == 2 && next_hit_cg != nullptr)) &&
298 !too_many_traps_or_recompiles(caller, bci, reason)
299 ) {
300 // Generate uncommon trap for class check failure path
301 // in case of monomorphic or bimorphic virtual call site.
302 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
303 Deoptimization::Action_maybe_recompile);
304 } else {
305 // Generate virtual call for class check failure path
306 // in case of polymorphic virtual call site.
307 miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)
308 : CallGenerator::for_virtual_call(callee, vtable_index));
309 }
310 if (miss_cg != nullptr) {
311 if (next_hit_cg != nullptr) {
312 assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation");
313 trace_type_profile(C, jvms->method(), jvms, next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
314 // We don't need to record dependency on a receiver here and below.
315 // Whenever we inline, the dependency is added by Parse::Parse().
316 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
317 }
318 if (miss_cg != nullptr) {
319 ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0);
320 trace_type_profile(C, jvms->method(), jvms, receiver_method, k, site_count, receiver_count);
321 float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0);
322 CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
323 if (cg != nullptr) {
324 return cg;
325 }
326 }
327 }
328 }
329 }
330 }
331
332 // If there is only one implementor of this interface then we
333 // may be able to bind this invoke directly to the implementing
334 // klass but we need both a dependence on the single interface
335 // and on the method we bind to. Additionally since all we know
336 // about the receiver type is that it's supposed to implement the
337 // interface we have to insert a check that it's the class we
338 // expect. Interface types are not checked by the verifier so
339 // they are roughly equivalent to Object.
340 // The number of implementors for declared_interface is less or
341 // equal to the number of implementors for target->holder() so
342 // if number of implementors of target->holder() == 1 then
343 // number of implementors for decl_interface is 0 or 1. If
344 // it's 0 then no class implements decl_interface and there's
345 // no point in inlining.
346 if (call_does_dispatch && is_interface) {
347 ciInstanceKlass* declared_interface = nullptr;
348 if (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface) {
349 // MemberName doesn't keep information about resolved interface class (REFC) once
350 // resolution is over, but resolved method holder (DECC) can be used as a
351 // conservative approximation.
352 declared_interface = callee->holder();
353 } else {
354 assert(!orig_callee->is_method_handle_intrinsic(), "not allowed");
355 declared_interface = caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
356 }
357 assert(declared_interface->is_interface(), "required");
358 ciInstanceKlass* singleton = declared_interface->unique_implementor();
359
360 if (singleton != nullptr) {
361 assert(singleton != declared_interface, "not a unique implementor");
362
363 ciMethod* cha_monomorphic_target =
364 callee->find_monomorphic_target(caller->holder(), declared_interface, singleton, check_access);
365
366 if (cha_monomorphic_target != nullptr &&
367 cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
368 ciKlass* holder = cha_monomorphic_target->holder();
369
370 // Try to inline the method found by CHA. Inlined method is guarded by the type check.
371 CallGenerator* hit_cg = call_generator(cha_monomorphic_target,
372 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
373
374 // Deoptimize on type check fail. The interpreter will throw ICCE for us.
375 CallGenerator* miss_cg = CallGenerator::for_uncommon_trap(callee,
376 Deoptimization::Reason_class_check, Deoptimization::Action_none);
377
378 ciKlass* constraint = (holder->is_subclass_of(singleton) ? holder : singleton); // avoid upcasts
379 CallGenerator* cg = CallGenerator::for_guarded_call(constraint, miss_cg, hit_cg);
380 if (hit_cg != nullptr && cg != nullptr) {
381 dependencies()->assert_unique_implementor(declared_interface, singleton);
382 dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee);
383 return cg;
384 }
385 }
386 }
387 } // call_does_dispatch && is_interface
388
389 // Nothing claimed the intrinsic, we go with straight-forward inlining
390 // for already discovered intrinsic.
391 if (allow_intrinsics && cg_intrinsic != nullptr) {
392 assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
393 return cg_intrinsic;
394 }
395 } // allow_inline
396
397 // There was no special inlining tactic, or it bailed out.
398 // Use a more generic tactic, like a simple call.
399 if (call_does_dispatch) {
400 const char* msg = "virtual call";
401 C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg);
402 C->log_inline_failure(msg);
403 if (IncrementalInlineVirtual && allow_inline) {
404 return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later
405 } else {
406 return CallGenerator::for_virtual_call(callee, vtable_index);
407 }
408 } else {
409 // Class Hierarchy Analysis or Type Profile reveals a unique target, or it is a static or special call.
410 CallGenerator* cg = CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
411 // For optimized virtual calls assert at runtime that receiver object
412 // is a subtype of the method holder.
413 if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) {
414 CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee,
415 Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none);
416 cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg);
417 }
418 return cg;
419 }
420 }
421
422 // Return true for methods that shouldn't be inlined early so that
423 // they are easier to analyze and optimize as intrinsics.
424 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
425 if (has_stringbuilder()) {
426
427 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
428 call_method->holder() == C->env()->StringBuffer_klass()) &&
429 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
430 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
431 // Delay SB calls only when called from non-SB code
432 return false;
433 }
434
435 switch (call_method->intrinsic_id()) {
436 case vmIntrinsics::_StringBuilder_void:
437 case vmIntrinsics::_StringBuilder_int:
438 case vmIntrinsics::_StringBuilder_String:
439 case vmIntrinsics::_StringBuilder_append_char:
440 case vmIntrinsics::_StringBuilder_append_int:
441 case vmIntrinsics::_StringBuilder_append_String:
442 case vmIntrinsics::_StringBuilder_toString:
443 case vmIntrinsics::_StringBuffer_void:
444 case vmIntrinsics::_StringBuffer_int:
445 case vmIntrinsics::_StringBuffer_String:
446 case vmIntrinsics::_StringBuffer_append_char:
447 case vmIntrinsics::_StringBuffer_append_int:
448 case vmIntrinsics::_StringBuffer_append_String:
449 case vmIntrinsics::_StringBuffer_toString:
450 case vmIntrinsics::_Integer_toString:
451 return true;
452
453 case vmIntrinsics::_String_String:
454 {
455 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
456 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
457 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
458 ciMethod* m = csj->method();
459 if (m != nullptr &&
460 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
461 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
462 // Delay String.<init>(new SB())
463 return true;
464 }
465 return false;
466 }
467
468 default:
469 return false;
470 }
471 }
472 return false;
473 }
474
475 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
476 if (eliminate_boxing() && call_method->is_boxing_method()) {
477 set_has_boxed_value(true);
478 return aggressive_unboxing();
479 }
480 return false;
481 }
482
483 bool Compile::should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms) {
484 return EnableVectorSupport && call_method->is_vector_method();
485 }
486
487 bool Compile::should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms) {
488 return EnableVectorSupport && (call_method->intrinsic_id() == vmIntrinsics::_VectorRebox);
489 }
490
491 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
492 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
493 // Additional inputs to consider...
494 // bc = bc()
495 // caller = method()
496 // iter().get_method_holder_index()
497 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
498 // Interface classes can be loaded & linked and never get around to
499 // being initialized. Uncommon-trap for not-initialized static or
500 // v-calls. Let interface calls happen.
501 ciInstanceKlass* holder_klass = dest_method->holder();
502 if (!holder_klass->is_being_initialized() &&
503 !holder_klass->is_initialized() &&
504 !holder_klass->is_interface()) {
505 uncommon_trap(Deoptimization::Reason_uninitialized,
506 Deoptimization::Action_reinterpret,
507 holder_klass);
508 return true;
509 }
510
511 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
512 return false;
513 }
514
515 #ifdef ASSERT
516 static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) {
517 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
518 ciMethod* resolved_method = cg->method();
519 if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) {
520 tty->print_cr("JVMS:");
521 jvms->dump();
522 tty->print_cr("Bytecode info:");
523 jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr();
524 tty->print_cr("Resolved method:");
525 cg->method()->print(); tty->cr();
526 return false;
527 }
528 return true;
529 }
530 #endif // ASSERT
531
532 //------------------------------do_call----------------------------------------
533 // Handle your basic call. Inline if we can & want to, else just setup call.
534 void Parse::do_call() {
535 // It's likely we are going to add debug info soon.
536 // Also, if we inline a guy who eventually needs debug info for this JVMS,
537 // our contribution to it is cleaned up right here.
538 kill_dead_locals();
539
540 // Set frequently used booleans
541 const bool is_virtual = bc() == Bytecodes::_invokevirtual;
542 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
543 const bool has_receiver = Bytecodes::has_receiver(bc());
544
545 // Find target being called
546 bool will_link;
547 ciSignature* declared_signature = nullptr;
548 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
549 ciInstanceKlass* holder_klass = orig_callee->holder();
550 ciKlass* holder = iter().get_declared_method_holder();
551 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
552 assert(declared_signature != nullptr, "cannot be null");
553 JFR_ONLY(Jfr::on_resolution(this, holder, orig_callee);)
554
555 // Bump max node limit for JSR292 users
556 if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
557 C->set_max_node_limit(3*MaxNodeLimit);
558 }
559
560 // uncommon-trap when callee is unloaded, uninitialized or will not link
561 // bailout when too many arguments for register representation
562 if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
563 if (PrintOpto && (Verbose || WizardMode)) {
564 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
565 orig_callee->print_name(); tty->cr();
566 }
567 return;
568 }
569 assert(holder_klass->is_loaded(), "");
570 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
571 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
572 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
573 assert(holder_klass->is_interface() || holder_klass->super() == nullptr || (bc() != Bytecodes::_invokeinterface), "must match bc");
574 // Note: In the absence of miranda methods, an abstract class K can perform
575 // an invokevirtual directly on an interface method I.m if K implements I.
576
577 // orig_callee is the resolved callee which's signature includes the
578 // appendix argument.
579 const int nargs = orig_callee->arg_size();
580 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
581
582 // Push appendix argument (MethodType, CallSite, etc.), if one.
583 if (iter().has_appendix()) {
584 ciObject* appendix_arg = iter().get_appendix();
585 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg, /* require_const= */ true);
586 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
587 push(appendix_arg_node);
588 }
589
590 // ---------------------
591 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
592 // Then we may inline or make a static call, but become dependent on there being only 1 target.
593 // Does the call-site type profile reveal only one receiver?
594 // Then we may introduce a run-time check and inline on the path where it succeeds.
595 // The other path may uncommon_trap, check for another receiver, or do a v-call.
596
597 // Try to get the most accurate receiver type
598 ciMethod* callee = orig_callee;
599 int vtable_index = Method::invalid_vtable_index;
600 bool call_does_dispatch = false;
601
602 // Speculative type of the receiver if any
603 ciKlass* speculative_receiver_type = nullptr;
604 if (is_virtual_or_interface) {
605 Node* receiver_node = stack(sp() - nargs);
606 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
607 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
608 // For arrays, klass below is Object. When vtable calls are used,
609 // resolving the call with Object would allow an illegal call to
610 // finalize() on an array. We use holder instead: illegal calls to
611 // finalize() won't be compiled as vtable calls (IC call
612 // resolution will catch the illegal call) and the few legal calls
613 // on array types won't be either.
614 callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,
615 receiver_type, is_virtual,
616 call_does_dispatch, vtable_index); // out-parameters
617 speculative_receiver_type = receiver_type != nullptr ? receiver_type->speculative_type() : nullptr;
618 }
619
620 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
621 ciKlass* receiver_constraint = nullptr;
622 if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_constructor()) {
623 ciInstanceKlass* calling_klass = method()->holder();
624 ciInstanceKlass* sender_klass = calling_klass;
625 if (sender_klass->is_interface()) {
626 receiver_constraint = sender_klass;
627 }
628 } else if (iter().cur_bc_raw() == Bytecodes::_invokeinterface && orig_callee->is_private()) {
629 assert(holder->is_interface(), "How did we get a non-interface method here!");
630 receiver_constraint = holder;
631 }
632
633 if (receiver_constraint != nullptr) {
634 Node* receiver_node = stack(sp() - nargs);
635 Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint, Type::trust_interfaces));
636 Node* bad_type_ctrl = nullptr;
637 SafePointNode* new_cast_failure_map = nullptr;
638 Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl, &new_cast_failure_map);
639 if (bad_type_ctrl != nullptr) {
640 PreserveJVMState pjvms(this);
641 if (new_cast_failure_map != nullptr) {
642 // The current map on the success path could have been modified. Use the dedicated failure path map.
643 set_map(new_cast_failure_map);
644 }
645 set_control(bad_type_ctrl);
646 uncommon_trap(Deoptimization::Reason_class_check,
647 Deoptimization::Action_none);
648 }
649 if (stopped()) {
650 return; // MUST uncommon-trap?
651 }
652 set_stack(sp() - nargs, casted_receiver);
653 }
654
655 // Note: It's OK to try to inline a virtual call.
656 // The call generator will not attempt to inline a polymorphic call
657 // unless it knows how to optimize the receiver dispatch.
658 bool try_inline = (C->do_inlining() || InlineAccessors);
659
660 // ---------------------
661 dec_sp(nargs); // Temporarily pop args for JVM state of call
662 JVMState* jvms = sync_jvms();
663
664 // ---------------------
665 // Decide call tactic.
666 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
667 // It decides whether inlining is desirable or not.
668 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
669 if (failing()) {
670 return;
671 }
672 assert(cg != nullptr, "must find a CallGenerator for callee %s", callee->name()->as_utf8());
673
674 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
675 orig_callee = callee = nullptr;
676
677 // ---------------------
678
679 // Feed profiling data for arguments to the type system so it can
680 // propagate it as speculative types
681 record_profiled_arguments_for_speculation(cg->method(), bc());
682
683 #ifndef PRODUCT
684 // bump global counters for calls
685 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
686
687 // Record first part of parsing work for this call
688 parse_histogram()->record_change();
689 #endif // not PRODUCT
690
691 assert(jvms == this->jvms(), "still operating on the right JVMS");
692 assert(jvms_in_sync(), "jvms must carry full info into CG");
693
694 // save across call, for a subsequent cast_not_null.
695 Node* receiver = has_receiver ? argument(0) : nullptr;
696
697 // The extra CheckCastPPs for speculative types mess with PhaseStringOpts
698 if (receiver != nullptr && !call_does_dispatch && !cg->is_string_late_inline()) {
699 // Feed profiling data for a single receiver to the type system so
700 // it can propagate it as a speculative type
701 receiver = record_profiled_receiver_for_speculation(receiver);
702 }
703
704 JVMState* new_jvms = cg->generate(jvms);
705 if (new_jvms == nullptr) {
706 // When inlining attempt fails (e.g., too many arguments),
707 // it may contaminate the current compile state, making it
708 // impossible to pull back and try again. Once we call
709 // cg->generate(), we are committed. If it fails, the whole
710 // compilation task is compromised.
711 if (failing()) return;
712
713 // This can happen if a library intrinsic is available, but refuses
714 // the call site, perhaps because it did not match a pattern the
715 // intrinsic was expecting to optimize. Should always be possible to
716 // get a normal java call that may inline in that case
717 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
718 new_jvms = cg->generate(jvms);
719 if (new_jvms == nullptr) {
720 guarantee(failing(), "call failed to generate: calls should work");
721 return;
722 }
723 }
724
725 if (cg->is_inline()) {
726 // Accumulate has_loops estimate
727 C->env()->notice_inlined_method(cg->method());
728 }
729
730 // Reset parser state from [new_]jvms, which now carries results of the call.
731 // Return value (if any) is already pushed on the stack by the cg.
732 add_exception_states_from(new_jvms);
733 if (new_jvms->map()->control() == top()) {
734 stop_and_kill_map();
735 } else {
736 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
737 set_jvms(new_jvms);
738 }
739
740 assert(check_call_consistency(jvms, cg), "inconsistent info");
741
742 if (!stopped()) {
743 // This was some sort of virtual call, which did a null check for us.
744 // Now we can assert receiver-not-null, on the normal return path.
745 if (receiver != nullptr && cg->is_virtual()) {
746 Node* cast = cast_not_null(receiver);
747 // %%% assert(receiver == cast, "should already have cast the receiver");
748 }
749
750 ciType* rtype = cg->method()->return_type();
751 ciType* ctype = declared_signature->return_type();
752
753 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
754 // Be careful here with return types.
755 if (ctype != rtype) {
756 BasicType rt = rtype->basic_type();
757 BasicType ct = ctype->basic_type();
758 if (ct == T_VOID) {
759 // It's OK for a method to return a value that is discarded.
760 // The discarding does not require any special action from the caller.
761 // The Java code knows this, at VerifyType.isNullConversion.
762 pop_node(rt); // whatever it was, pop it
763 } else if (rt == T_INT || is_subword_type(rt)) {
764 // Nothing. These cases are handled in lambda form bytecode.
765 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct));
766 } else if (is_reference_type(rt)) {
767 assert(is_reference_type(ct), "rt=%s, ct=%s", type2name(rt), type2name(ct));
768 if (ctype->is_loaded()) {
769 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
770 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
771 if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) {
772 Node* retnode = pop();
773 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type));
774 push(cast_obj);
775 }
776 }
777 } else {
778 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct));
779 // push a zero; it's better than getting an oop/int mismatch
780 pop_node(rt);
781 Node* retnode = zerocon(ct);
782 push_node(ct, retnode);
783 }
784 // Now that the value is well-behaved, continue with the call-site type.
785 rtype = ctype;
786 }
787 } else {
788 // Symbolic resolution enforces the types to be the same.
789 // NOTE: We must relax the assert for unloaded types because two
790 // different ciType instances of the same unloaded class type
791 // can appear to be "loaded" by different loaders (depending on
792 // the accessing class).
793 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
794 "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name());
795 }
796
797 // If the return type of the method is not loaded, assert that the
798 // value we got is a null. Otherwise, we need to recompile.
799 if (!rtype->is_loaded()) {
800 if (PrintOpto && (Verbose || WizardMode)) {
801 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
802 cg->method()->print_name(); tty->cr();
803 }
804 if (C->log() != nullptr) {
805 C->log()->elem("assert_null reason='return' klass='%d'",
806 C->log()->identify(rtype));
807 }
808 // If there is going to be a trap, put it at the next bytecode:
809 set_bci(iter().next_bci());
810 null_assert(peek());
811 set_bci(iter().cur_bci()); // put it back
812 }
813 BasicType ct = ctype->basic_type();
814 if (is_reference_type(ct)) {
815 record_profiled_return_for_speculation();
816 }
817
818 if (!rtype->is_void()) {
819 Node* retnode = peek();
820 const Type* rettype = gvn().type(retnode);
821 if (!cg->method()->return_value_is_larval() && !retnode->is_InlineType() && rettype->is_inlinetypeptr()) {
822 retnode = InlineTypeNode::make_from_oop(this, retnode, rettype->inline_klass());
823 dec_sp(1);
824 push(retnode);
825 }
826 }
827
828 if (cg->method()->receiver_maybe_larval() && receiver != nullptr &&
829 !receiver->is_InlineType() && gvn().type(receiver)->is_inlinetypeptr()) {
830 InlineTypeNode* non_larval = InlineTypeNode::make_from_oop(this, receiver, gvn().type(receiver)->inline_klass());
831 // Relinquish the oop input, we will delay the allocation to the point it is needed, see the
832 // comments in InlineTypeNode::Ideal for more details
833 non_larval = non_larval->clone_if_required(&gvn(), nullptr);
834 non_larval->set_oop(gvn(), null());
835 non_larval->set_is_buffered(gvn(), false);
836 non_larval = gvn().transform(non_larval)->as_InlineType();
837 map()->replace_edge(receiver, non_larval);
838 }
839 }
840
841 // Restart record of parsing work after possible inlining of call
842 #ifndef PRODUCT
843 parse_histogram()->set_initial_state(bc());
844 #endif
845 }
846
847 //---------------------------catch_call_exceptions-----------------------------
848 // Put a Catch and CatchProj nodes behind a just-created call.
849 // Send their caught exceptions to the proper handler.
850 // This may be used after a call to the rethrow VM stub,
851 // when it is needed to process unloaded exception classes.
852 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
853 // Exceptions are delivered through this channel:
854 Node* i_o = this->i_o();
855
856 // Add a CatchNode.
857 Arena tmp_mem{mtCompiler};
858 GrowableArray<int> bcis(&tmp_mem, 8, 0, -1);
859 GrowableArray<const Type*> extypes(&tmp_mem, 8, 0, nullptr);
860 GrowableArray<int> saw_unloaded(&tmp_mem, 8, 0, -1);
861
862 bool default_handler = false;
863 for (; !handlers.is_done(); handlers.next()) {
864 ciExceptionHandler* h = handlers.handler();
865 int h_bci = h->handler_bci();
866 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
867 // Do not introduce unloaded exception types into the graph:
868 if (!h_klass->is_loaded()) {
869 if (saw_unloaded.contains(h_bci)) {
870 /* We've already seen an unloaded exception with h_bci,
871 so don't duplicate. Duplication will cause the CatchNode to be
872 unnecessarily large. See 4713716. */
873 continue;
874 } else {
875 saw_unloaded.append(h_bci);
876 }
877 }
878 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
879 // (We use make_from_klass because it respects UseUniqueSubclasses.)
880 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
881 assert(!h_extype->empty(), "sanity");
882 // Note: It's OK if the BCIs repeat themselves.
883 bcis.append(h_bci);
884 extypes.append(h_extype);
885 if (h_bci == -1) {
886 default_handler = true;
887 }
888 }
889
890 if (!default_handler) {
891 bcis.append(-1);
892 const Type* extype = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
893 extype = extype->join(TypeInstPtr::NOTNULL);
894 extypes.append(extype);
895 }
896
897 int len = bcis.length();
898 CatchNode *cn = new CatchNode(control(), i_o, len+1);
899 Node *catch_ = _gvn.transform(cn);
900
901 // now branch with the exception state to each of the (potential)
902 // handlers
903 for(int i=0; i < len; i++) {
904 // Setup JVM state to enter the handler.
905 PreserveJVMState pjvms(this);
906 // Locals are just copied from before the call.
907 // Get control from the CatchNode.
908 int handler_bci = bcis.at(i);
909 Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci));
910 // This handler cannot happen?
911 if (ctrl == top()) continue;
912 set_control(ctrl);
913
914 // Create exception oop
915 const TypeInstPtr* extype = extypes.at(i)->is_instptr();
916 Node* ex_oop = _gvn.transform(new CreateExNode(extypes.at(i), ctrl, i_o));
917
918 // Handle unloaded exception classes.
919 if (saw_unloaded.contains(handler_bci)) {
920 // An unloaded exception type is coming here. Do an uncommon trap.
921 #ifndef PRODUCT
922 // We do not expect the same handler bci to take both cold unloaded
923 // and hot loaded exceptions. But, watch for it.
924 if (PrintOpto && (Verbose || WizardMode) && extype->is_loaded()) {
925 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
926 method()->print_name(); tty->cr();
927 } else if (PrintOpto && (Verbose || WizardMode)) {
928 tty->print("Bailing out on unloaded exception type ");
929 extype->instance_klass()->print_name();
930 tty->print(" at bci:%d in ", bci());
931 method()->print_name(); tty->cr();
932 }
933 #endif
934 // Emit an uncommon trap instead of processing the block.
935 set_bci(handler_bci);
936 push_ex_oop(ex_oop);
937 uncommon_trap(Deoptimization::Reason_unloaded,
938 Deoptimization::Action_reinterpret,
939 extype->instance_klass(), "!loaded exception");
940 set_bci(iter().cur_bci()); // put it back
941 continue;
942 }
943
944 // go to the exception handler
945 if (handler_bci < 0) { // merge with corresponding rethrow node
946 throw_to_exit(make_exception_state(ex_oop));
947 } else { // Else jump to corresponding handle
948 push_and_merge_exception(handler_bci, ex_oop);
949 }
950 }
951
952 // The first CatchProj is for the normal return.
953 // (Note: If this is a call to rethrow_Java, this node goes dead.)
954 set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
955 }
956
957
958 //----------------------------catch_inline_exceptions--------------------------
959 // Handle all exceptions thrown by an inlined method or individual bytecode.
960 // Common case 1: we have no handler, so all exceptions merge right into
961 // the rethrow case.
962 // Case 2: we have some handlers, with loaded exception klasses that have
963 // no subklasses. We do a Deutsch-Schiffman style type-check on the incoming
964 // exception oop and branch to the handler directly.
965 // Case 3: We have some handlers with subklasses or are not loaded at
966 // compile-time. We have to call the runtime to resolve the exception.
967 // So we insert a RethrowCall and all the logic that goes with it.
968 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
969 // Caller is responsible for saving away the map for normal control flow!
970 assert(stopped(), "call set_map(nullptr) first");
971 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
972
973 Node* ex_node = saved_ex_oop(ex_map);
974 if (ex_node == top()) {
975 // No action needed.
976 return;
977 }
978 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
979 NOT_PRODUCT(if (ex_type==nullptr) tty->print_cr("*** Exception not InstPtr"));
980 if (ex_type == nullptr)
981 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
982
983 // determine potential exception handlers
984 ciExceptionHandlerStream handlers(method(), bci(),
985 ex_type->instance_klass(),
986 ex_type->klass_is_exact());
987
988 // Start executing from the given throw state. (Keep its stack, for now.)
989 // Get the exception oop as known at compile time.
990 ex_node = use_exception_state(ex_map);
991
992 // Get the exception oop klass from its header
993 Node* ex_klass_node = nullptr;
994 if (has_exception_handler() && !ex_type->klass_is_exact()) {
995 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
996 ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
997
998 // Compute the exception klass a little more cleverly.
999 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
1000 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
1001 // each arm of the Phi. If I know something clever about the exceptions
1002 // I'm loading the class from, I can replace the LoadKlass with the
1003 // klass constant for the exception oop.
1004 if (ex_node->is_Phi()) {
1005 ex_klass_node = new PhiNode(ex_node->in(0), TypeInstKlassPtr::OBJECT);
1006 for (uint i = 1; i < ex_node->req(); i++) {
1007 Node* ex_in = ex_node->in(i);
1008 if (ex_in == top() || ex_in == nullptr) {
1009 // This path was not taken.
1010 ex_klass_node->init_req(i, top());
1011 continue;
1012 }
1013 Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
1014 Node* k = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1015 ex_klass_node->init_req( i, k );
1016 }
1017 ex_klass_node = _gvn.transform(ex_klass_node);
1018 }
1019 }
1020
1021 // Scan the exception table for applicable handlers.
1022 // If none, we can call rethrow() and be done!
1023 // If precise (loaded with no subklasses), insert a D.S. style
1024 // pointer compare to the correct handler and loop back.
1025 // If imprecise, switch to the Rethrow VM-call style handling.
1026
1027 int remaining = handlers.count_remaining();
1028
1029 // iterate through all entries sequentially
1030 for (;!handlers.is_done(); handlers.next()) {
1031 ciExceptionHandler* handler = handlers.handler();
1032
1033 if (handler->is_rethrow()) {
1034 // If we fell off the end of the table without finding an imprecise
1035 // exception klass (and without finding a generic handler) then we
1036 // know this exception is not handled in this method. We just rethrow
1037 // the exception into the caller.
1038 throw_to_exit(make_exception_state(ex_node));
1039 return;
1040 }
1041
1042 // exception handler bci range covers throw_bci => investigate further
1043 int handler_bci = handler->handler_bci();
1044
1045 if (remaining == 1) {
1046 if (PrintOpto && WizardMode) {
1047 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
1048 }
1049 push_and_merge_exception(handler_bci, ex_node); // jump to handler
1050 return; // No more handling to be done here!
1051 }
1052
1053 // Get the handler's klass
1054 ciInstanceKlass* klass = handler->catch_klass();
1055
1056 if (!klass->is_loaded()) { // klass is not loaded?
1057 // fall through into catch_call_exceptions which will emit a
1058 // handler with an uncommon trap.
1059 break;
1060 }
1061
1062 if (klass->is_interface()) // should not happen, but...
1063 break; // bail out
1064
1065 // Check the type of the exception against the catch type
1066 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
1067 Node* con = _gvn.makecon(tk);
1068 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
1069 if (!stopped()) {
1070 PreserveJVMState pjvms(this);
1071 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
1072 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
1073 Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst));
1074 if (PrintOpto && WizardMode) {
1075 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
1076 klass->print_name();
1077 tty->cr();
1078 }
1079 // If this is a backwards branch in the bytecodes, add safepoint
1080 push_and_merge_exception(handler_bci, ex_oop);
1081 }
1082 set_control(not_subtype_ctrl);
1083
1084 // Come here if exception does not match handler.
1085 // Carry on with more handler checks.
1086 --remaining;
1087 }
1088
1089 assert(!stopped(), "you should return if you finish the chain");
1090
1091 // Oops, need to call into the VM to resolve the klasses at runtime.
1092 kill_dead_locals();
1093
1094 { PreserveReexecuteState preexecs(this);
1095 // When throwing an exception, set the reexecute flag for deoptimization.
1096 // This is mostly needed to pass -XX:+VerifyStack sanity checks.
1097 jvms()->set_should_reexecute(true);
1098
1099 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
1100 OptoRuntime::rethrow_Type(),
1101 OptoRuntime::rethrow_stub(),
1102 nullptr, nullptr,
1103 ex_node);
1104 }
1105
1106 // Rethrow is a pure call, no side effects, only a result.
1107 // The result cannot be allocated, so we use I_O
1108
1109 // Catch exceptions from the rethrow
1110 catch_call_exceptions(handlers);
1111 }
1112
1113
1114 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
1115
1116
1117 #ifndef PRODUCT
1118 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
1119 if (CountCompiledCalls) {
1120 if (at_method_entry) {
1121 // bump invocation counter if top method (for statistics)
1122 if (CountCompiledCalls && depth() == 1) {
1123 const TypePtr* addr_type = TypeMetadataPtr::make(method());
1124 Node* adr1 = makecon(addr_type);
1125 Node* adr2 = off_heap_plus_addr(adr1, in_bytes(Method::compiled_invocation_counter_offset()));
1126 increment_counter(adr2);
1127 }
1128 } else if (is_inline) {
1129 switch (bc()) {
1130 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
1131 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
1132 case Bytecodes::_invokestatic:
1133 case Bytecodes::_invokedynamic:
1134 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
1135 default: fatal("unexpected call bytecode");
1136 }
1137 } else {
1138 switch (bc()) {
1139 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
1140 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
1141 case Bytecodes::_invokestatic:
1142 case Bytecodes::_invokedynamic:
1143 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
1144 default: fatal("unexpected call bytecode");
1145 }
1146 }
1147 }
1148 }
1149 #endif //PRODUCT
1150
1151
1152 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
1153 ciKlass* holder, ciMethod* callee,
1154 const TypeOopPtr* receiver_type, bool is_virtual,
1155 bool& call_does_dispatch, int& vtable_index,
1156 bool check_access) {
1157 // Set default values for out-parameters.
1158 call_does_dispatch = true;
1159 vtable_index = Method::invalid_vtable_index;
1160
1161 // Choose call strategy.
1162 ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, holder, callee,
1163 receiver_type, check_access);
1164
1165 // Have the call been sufficiently improved such that it is no longer a virtual?
1166 if (optimized_virtual_method != nullptr) {
1167 callee = optimized_virtual_method;
1168 call_does_dispatch = false;
1169 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
1170 // We can make a vtable call at this site
1171 vtable_index = callee->resolve_vtable_index(caller->holder(), holder);
1172 }
1173 return callee;
1174 }
1175
1176 // Identify possible target method and inlining style
1177 ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
1178 ciMethod* callee, const TypeOopPtr* receiver_type,
1179 bool check_access) {
1180 // only use for virtual or interface calls
1181
1182 // If it is obviously final, do not bother to call find_monomorphic_target,
1183 // because the class hierarchy checks are not needed, and may fail due to
1184 // incompletely loaded classes. Since we do our own class loading checks
1185 // in this module, we may confidently bind to any method.
1186 if (callee->can_be_statically_bound()) {
1187 return callee;
1188 }
1189
1190 if (receiver_type == nullptr) {
1191 return nullptr; // no receiver type info
1192 }
1193
1194 // Attempt to improve the receiver
1195 bool actual_receiver_is_exact = false;
1196 ciInstanceKlass* actual_receiver = klass;
1197 // Array methods are all inherited from Object, and are monomorphic.
1198 // finalize() call on array is not allowed.
1199 if (receiver_type->isa_aryptr() &&
1200 callee->holder() == env()->Object_klass() &&
1201 callee->name() != ciSymbols::finalize_method_name()) {
1202 return callee;
1203 }
1204
1205 // All other interesting cases are instance klasses.
1206 if (!receiver_type->isa_instptr()) {
1207 return nullptr;
1208 }
1209
1210 ciInstanceKlass* receiver_klass = receiver_type->is_instptr()->instance_klass();
1211 if (receiver_klass->is_loaded() && receiver_klass->is_initialized() && !receiver_klass->is_interface() &&
1212 (receiver_klass == actual_receiver || receiver_klass->is_subtype_of(actual_receiver))) {
1213 // ikl is a same or better type than the original actual_receiver,
1214 // e.g. static receiver from bytecodes.
1215 actual_receiver = receiver_klass;
1216 // Is the actual_receiver exact?
1217 actual_receiver_is_exact = receiver_type->klass_is_exact();
1218 }
1219
1220 ciInstanceKlass* calling_klass = caller->holder();
1221 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
1222
1223 if (cha_monomorphic_target != nullptr) {
1224 // Hardwiring a virtual.
1225 assert(!callee->can_be_statically_bound(), "should have been handled earlier");
1226 assert(!cha_monomorphic_target->is_abstract(), "");
1227 if (!cha_monomorphic_target->can_be_statically_bound(actual_receiver)) {
1228 // If we inlined because CHA revealed only a single target method,
1229 // then we are dependent on that target method not getting overridden
1230 // by dynamic class loading. Be sure to test the "static" receiver
1231 // dest_method here, as opposed to the actual receiver, which may
1232 // falsely lead us to believe that the receiver is final or private.
1233 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target, holder, callee);
1234 }
1235 return cha_monomorphic_target;
1236 }
1237
1238 // If the type is exact, we can still bind the method w/o a vcall.
1239 // (This case comes after CHA so we can see how much extra work it does.)
1240 if (actual_receiver_is_exact) {
1241 // In case of evolution, there is a dependence on every inlined method, since each
1242 // such method can be changed when its class is redefined.
1243 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
1244 if (exact_method != nullptr) {
1245 return exact_method;
1246 }
1247 }
1248
1249 return nullptr;
1250 }