1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/method.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/c2compiler.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/convertnode.hpp"
33 #include "opto/idealGraphPrinter.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/locknode.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/opaquenode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/arguments.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/safepointMechanism.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 // Static array so we can figure out which bytecodes stop us from compiling
50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
52
53 #ifndef PRODUCT
54 uint nodes_created = 0;
55 uint methods_parsed = 0;
56 uint methods_seen = 0;
57 uint blocks_parsed = 0;
58 uint blocks_seen = 0;
59
60 uint explicit_null_checks_inserted = 0;
61 uint explicit_null_checks_elided = 0;
62 uint all_null_checks_found = 0;
63 uint implicit_null_checks = 0;
64
65 bool Parse::BytecodeParseHistogram::_initialized = false;
66 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
67 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
68 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
69 uint Parse::BytecodeParseHistogram::_new_values [Bytecodes::number_of_codes];
70
71 //------------------------------print_statistics-------------------------------
72 void Parse::print_statistics() {
73 tty->print_cr("--- Compiler Statistics ---");
74 tty->print("Methods seen: %u Methods parsed: %u", methods_seen, methods_parsed);
75 tty->print(" Nodes created: %u", nodes_created);
76 tty->cr();
77 if (methods_seen != methods_parsed) {
78 tty->print_cr("Reasons for parse failures (NOT cumulative):");
79 }
80 tty->print_cr("Blocks parsed: %u Blocks seen: %u", blocks_parsed, blocks_seen);
81
82 if (explicit_null_checks_inserted) {
83 tty->print_cr("%u original null checks - %u elided (%2u%%); optimizer leaves %u,",
84 explicit_null_checks_inserted, explicit_null_checks_elided,
85 (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
86 all_null_checks_found);
87 }
88 if (all_null_checks_found) {
89 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
90 (100*implicit_null_checks)/all_null_checks_found);
91 }
92 if (SharedRuntime::_implicit_null_throws) {
93 tty->print_cr("%u implicit null exceptions at runtime",
94 SharedRuntime::_implicit_null_throws);
95 }
96
97 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
98 BytecodeParseHistogram::print();
99 }
100 }
101 #endif
102
103 //------------------------------ON STACK REPLACEMENT---------------------------
104
105 // Construct a node which can be used to get incoming state for
106 // on stack replacement.
107 Node* Parse::fetch_interpreter_state(int index,
108 const Type* type,
109 Node* local_addrs,
110 Node* local_addrs_base) {
111 BasicType bt = type->basic_type();
112 if (type == TypePtr::NULL_PTR) {
113 // Ptr types are mixed together with T_ADDRESS but nullptr is
114 // really for T_OBJECT types so correct it.
115 bt = T_OBJECT;
116 }
117 Node *mem = memory(Compile::AliasIdxRaw);
118 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
119 Node *ctl = control();
120
121 // Very similar to LoadNode::make, except we handle un-aligned longs and
122 // doubles on Sparc. Intel can handle them just fine directly.
123 Node *l = nullptr;
124 switch (bt) { // Signature is flattened
125 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
126 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
127 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
128 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
129 case T_LONG:
130 case T_DOUBLE: {
131 // Since arguments are in reverse order, the argument address 'adr'
132 // refers to the back half of the long/double. Recompute adr.
133 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
134 if (Matcher::misaligned_doubles_ok) {
135 l = (bt == T_DOUBLE)
136 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
137 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
138 } else {
139 l = (bt == T_DOUBLE)
140 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
141 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
142 }
143 break;
144 }
145 default: ShouldNotReachHere();
146 }
147 return _gvn.transform(l);
148 }
149
150 // Helper routine to prevent the interpreter from handing
151 // unexpected typestate to an OSR method.
152 // The Node l is a value newly dug out of the interpreter frame.
153 // The type is the type predicted by ciTypeFlow. Note that it is
154 // not a general type, but can only come from Type::get_typeflow_type.
155 // The safepoint is a map which will feed an uncommon trap.
156 Node* Parse::check_interpreter_type(Node* l, const Type* type, const TypeKlassPtr* klass_type,
157 SafePointNode* &bad_type_exit, bool is_early_larval) {
158 const TypeOopPtr* tp = type->isa_oopptr();
159
160 // TypeFlow may assert null-ness if a type appears unloaded.
161 if (type == TypePtr::NULL_PTR ||
162 (tp != nullptr && !tp->is_loaded())) {
163 // Value must be null, not a real oop.
164 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
165 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
166 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
167 set_control(_gvn.transform( new IfTrueNode(iff) ));
168 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
169 bad_type_exit->control()->add_req(bad_type);
170 l = null();
171 }
172
173 // Typeflow can also cut off paths from the CFG, based on
174 // types which appear unloaded, or call sites which appear unlinked.
175 // When paths are cut off, values at later merge points can rise
176 // toward more specific classes. Make sure these specific classes
177 // are still in effect.
178 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
179 // TypeFlow asserted a specific object type. Value must have that type.
180 Node* bad_type_ctrl = nullptr;
181 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
182 // Check inline types for null here to prevent checkcast from adding an
183 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
184 l = null_check_oop(l, &bad_type_ctrl);
185 bad_type_exit->control()->add_req(bad_type_ctrl);
186 }
187
188 l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, false, is_early_larval);
189 bad_type_exit->control()->add_req(bad_type_ctrl);
190 }
191
192 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
193 return l;
194 }
195
196 // Helper routine which sets up elements of the initial parser map when
197 // performing a parse for on stack replacement. Add values into map.
198 // The only parameter contains the address of a interpreter arguments.
199 void Parse::load_interpreter_state(Node* osr_buf) {
200 int index;
201 int max_locals = jvms()->loc_size();
202 int max_stack = jvms()->stk_size();
203
204 // Mismatch between method and jvms can occur since map briefly held
205 // an OSR entry state (which takes up one RawPtr word).
206 assert(max_locals == method()->max_locals(), "sanity");
207 assert(max_stack >= method()->max_stack(), "sanity");
208 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
209 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
210
211 // Find the start block.
212 Block* osr_block = start_block();
213 assert(osr_block->start() == osr_bci(), "sanity");
214
215 // Set initial BCI.
216 set_parse_bci(osr_block->start());
217
218 // Set initial stack depth.
219 set_sp(osr_block->start_sp());
220
221 // Check bailouts. We currently do not perform on stack replacement
222 // of loops in catch blocks or loops which branch with a non-empty stack.
223 if (sp() != 0) {
224 C->record_method_not_compilable("OSR starts with non-empty stack");
225 return;
226 }
227 // Do not OSR inside finally clauses:
228 if (osr_block->has_trap_at(osr_block->start())) {
229 assert(false, "OSR starts with an immediate trap");
230 C->record_method_not_compilable("OSR starts with an immediate trap");
231 return;
232 }
233
234 // Commute monitors from interpreter frame to compiler frame.
235 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
236 int mcnt = osr_block->flow()->monitor_count();
237 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
238 for (index = 0; index < mcnt; index++) {
239 // Make a BoxLockNode for the monitor.
240 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
241 // Check for bailout after new BoxLockNode
242 if (failing()) { return; }
243
244 // This OSR locking region is unbalanced because it does not have Lock node:
245 // locking was done in Interpreter.
246 // This is similar to Coarsened case when Lock node is eliminated
247 // and as result the region is marked as Unbalanced.
248
249 // Emulate Coarsened state transition from Regular to Unbalanced.
250 osr_box->set_coarsened();
251 osr_box->set_unbalanced();
252
253 Node* box = _gvn.transform(osr_box);
254
255 // Displaced headers and locked objects are interleaved in the
256 // temp OSR buffer. We only copy the locked objects out here.
257 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
258 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
259 // Try and copy the displaced header to the BoxNode
260 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
261
262 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
263
264 // Build a bogus FastLockNode (no code will be generated) and push the
265 // monitor into our debug info.
266 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
267 map()->push_monitor(flock);
268
269 // If the lock is our method synchronization lock, tuck it away in
270 // _sync_lock for return and rethrow exit paths.
271 if (index == 0 && method()->is_synchronized()) {
272 _synch_lock = flock;
273 }
274 }
275
276 // Use the raw liveness computation to make sure that unexpected
277 // values don't propagate into the OSR frame.
278 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
279 if (!live_locals.is_valid()) {
280 // Degenerate or breakpointed method.
281 assert(false, "OSR in empty or breakpointed method");
282 C->record_method_not_compilable("OSR in empty or breakpointed method");
283 return;
284 }
285
286 // Extract the needed locals from the interpreter frame.
287 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
288
289 // find all the locals that the interpreter thinks contain live oops
290 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
291 for (index = 0; index < max_locals; index++) {
292
293 if (!live_locals.at(index)) {
294 continue;
295 }
296
297 const Type *type = osr_block->local_type_at(index);
298
299 if (type->isa_oopptr() != nullptr) {
300
301 // 6403625: Verify that the interpreter oopMap thinks that the oop is live
302 // else we might load a stale oop if the MethodLiveness disagrees with the
303 // result of the interpreter. If the interpreter says it is dead we agree
304 // by making the value go to top.
305 //
306
307 if (!live_oops.at(index)) {
308 if (C->log() != nullptr) {
309 C->log()->elem("OSR_mismatch local_index='%d'",index);
310 }
311 set_local(index, null());
312 // and ignore it for the loads
313 continue;
314 }
315 }
316
317 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
318 if (type == Type::TOP || type == Type::HALF) {
319 continue;
320 }
321 // If the type falls to bottom, then this must be a local that
322 // is mixing ints and oops or some such. Forcing it to top
323 // makes it go dead.
324 if (type == Type::BOTTOM) {
325 continue;
326 }
327 // Construct code to access the appropriate local.
328 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
329 set_local(index, value);
330 }
331
332 // Extract the needed stack entries from the interpreter frame.
333 for (index = 0; index < sp(); index++) {
334 const Type *type = osr_block->stack_type_at(index);
335 if (type != Type::TOP) {
336 // Currently the compiler bails out when attempting to on stack replace
337 // at a bci with a non-empty stack. We should not reach here.
338 ShouldNotReachHere();
339 }
340 }
341
342 // End the OSR migration
343 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
344 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
345 "OSR_migration_end", TypeRawPtr::BOTTOM,
346 osr_buf);
347
348 // Now that the interpreter state is loaded, make sure it will match
349 // at execution time what the compiler is expecting now:
350 SafePointNode* bad_type_exit = clone_map();
351 bad_type_exit->set_control(new RegionNode(1));
352
353 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
354 for (index = 0; index < max_locals; index++) {
355 if (stopped()) break;
356 Node* l = local(index);
357 if (l->is_top()) continue; // nothing here
358 const Type *type = osr_block->local_type_at(index);
359 if (type->isa_oopptr() != nullptr) {
360 if (!live_oops.at(index)) {
361 // skip type check for dead oops
362 continue;
363 }
364 }
365 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
366 // In our current system it's illegal for jsr addresses to be
367 // live into an OSR entry point because the compiler performs
368 // inlining of jsrs. ciTypeFlow has a bailout that detect this
369 // case and aborts the compile if addresses are live into an OSR
370 // entry point. Because of that we can assume that any address
371 // locals at the OSR entry point are dead. Method liveness
372 // isn't precise enough to figure out that they are dead in all
373 // cases so simply skip checking address locals all
374 // together. Any type check is guaranteed to fail since the
375 // interpreter type is the result of a load which might have any
376 // value and the expected type is a constant.
377 continue;
378 }
379 const TypeKlassPtr* klass_type = nullptr;
380 if (type->isa_oopptr()) {
381 klass_type = TypeKlassPtr::make(osr_block->flow()->local_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
382 klass_type = klass_type->try_improve();
383 }
384 bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
385 set_local(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
386 }
387
388 for (index = 0; index < sp(); index++) {
389 if (stopped()) break;
390 Node* l = stack(index);
391 if (l->is_top()) continue; // nothing here
392 const Type* type = osr_block->stack_type_at(index);
393 const TypeKlassPtr* klass_type = nullptr;
394 if (type->isa_oopptr()) {
395 klass_type = TypeKlassPtr::make(osr_block->flow()->stack_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
396 klass_type = klass_type->try_improve();
397 }
398 bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
399 set_stack(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
400 }
401
402 if (bad_type_exit->control()->req() > 1) {
403 // Build an uncommon trap here, if any inputs can be unexpected.
404 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
405 record_for_igvn(bad_type_exit->control());
406 SafePointNode* types_are_good = map();
407 set_map(bad_type_exit);
408 // The unexpected type happens because a new edge is active
409 // in the CFG, which typeflow had previously ignored.
410 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
411 // This x will be typed as Integer if notReached is not yet linked.
412 // It could also happen due to a problem in ciTypeFlow analysis.
413 uncommon_trap(Deoptimization::Reason_constraint,
414 Deoptimization::Action_reinterpret);
415 set_map(types_are_good);
416 }
417 }
418
419 //------------------------------Parse------------------------------------------
420 // Main parser constructor.
421 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
422 : _exits(caller)
423 {
424 // Init some variables
425 _caller = caller;
426 _method = parse_method;
427 _expected_uses = expected_uses;
428 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
429 _wrote_final = false;
430 _wrote_volatile = false;
431 _wrote_stable = false;
432 _wrote_fields = false;
433 _alloc_with_final_or_stable = nullptr;
434 _block = nullptr;
435 _first_return = true;
436 _replaced_nodes_for_exceptions = false;
437 _new_idx = C->unique();
438 DEBUG_ONLY(_entry_bci = UnknownBci);
439 DEBUG_ONLY(_block_count = -1);
440 DEBUG_ONLY(_blocks = (Block*)-1);
441 #ifndef PRODUCT
442 if (PrintCompilation || PrintOpto) {
443 // Make sure I have an inline tree, so I can print messages about it.
444 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
445 }
446 _max_switch_depth = 0;
447 _est_switch_depth = 0;
448 #endif
449
450 if (parse_method->has_reserved_stack_access()) {
451 C->set_has_reserved_stack_access(true);
452 }
453
454 if (parse_method->is_synchronized() || parse_method->has_monitor_bytecodes()) {
455 C->set_has_monitors(true);
456 }
457
458 if (parse_method->is_scoped()) {
459 C->set_has_scoped_access(true);
460 }
461
462 _iter.reset_to_method(method());
463 C->set_has_loops(C->has_loops() || method()->has_loops());
464
465 if (_expected_uses <= 0) {
466 _prof_factor = 1;
467 } else {
468 float prof_total = parse_method->interpreter_invocation_count();
469 if (prof_total <= _expected_uses) {
470 _prof_factor = 1;
471 } else {
472 _prof_factor = _expected_uses / prof_total;
473 }
474 }
475
476 CompileLog* log = C->log();
477 if (log != nullptr) {
478 log->begin_head("parse method='%d' uses='%f'",
479 log->identify(parse_method), expected_uses);
480 if (depth() == 1 && C->is_osr_compilation()) {
481 log->print(" osr_bci='%d'", C->entry_bci());
482 }
483 log->stamp();
484 log->end_head();
485 }
486
487 // Accumulate deoptimization counts.
488 // (The range_check and store_check counts are checked elsewhere.)
489 ciMethodData* md = method()->method_data();
490 for (uint reason = 0; reason < md->trap_reason_limit(); reason++) {
491 uint md_count = md->trap_count(reason);
492 if (md_count != 0) {
493 if (md_count >= md->trap_count_limit()) {
494 md_count = md->trap_count_limit() + md->overflow_trap_count();
495 }
496 uint total_count = C->trap_count(reason);
497 uint old_count = total_count;
498 total_count += md_count;
499 // Saturate the add if it overflows.
500 if (total_count < old_count || total_count < md_count)
501 total_count = (uint)-1;
502 C->set_trap_count(reason, total_count);
503 if (log != nullptr)
504 log->elem("observe trap='%s' count='%d' total='%d'",
505 Deoptimization::trap_reason_name(reason),
506 md_count, total_count);
507 }
508 }
509 // Accumulate total sum of decompilations, also.
510 C->set_decompile_count(C->decompile_count() + md->decompile_count());
511
512 if (log != nullptr && method()->has_exception_handlers()) {
513 log->elem("observe that='has_exception_handlers'");
514 }
515
516 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
517 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
518
519 // Always register dependence if JVMTI is enabled, because
520 // either breakpoint setting or hotswapping of methods may
521 // cause deoptimization.
522 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
523 C->dependencies()->assert_evol_method(method());
524 }
525
526 NOT_PRODUCT(methods_seen++);
527
528 // Do some special top-level things.
529 if (depth() == 1 && C->is_osr_compilation()) {
530 _tf = C->tf(); // the OSR entry type is different
531 _entry_bci = C->entry_bci();
532 _flow = method()->get_osr_flow_analysis(osr_bci());
533 } else {
534 _tf = TypeFunc::make(method());
535 _entry_bci = InvocationEntryBci;
536 _flow = method()->get_flow_analysis();
537 }
538
539 if (_flow->failing()) {
540 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
541 // can lead to this. Re-enable once 8284443 is fixed.
542 //assert(false, "type flow analysis failed during parsing");
543 C->record_method_not_compilable(_flow->failure_reason());
544 #ifndef PRODUCT
545 if (PrintOpto && (Verbose || WizardMode)) {
546 if (is_osr_parse()) {
547 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
548 } else {
549 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
550 }
551 if (Verbose) {
552 method()->print();
553 method()->print_codes();
554 _flow->print();
555 }
556 }
557 #endif
558 }
559
560 #ifdef ASSERT
561 if (depth() == 1) {
562 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
563 } else {
564 assert(!this->is_osr_parse(), "no recursive OSR");
565 }
566 #endif
567
568 #ifndef PRODUCT
569 if (_flow->has_irreducible_entry()) {
570 C->set_parsed_irreducible_loop(true);
571 }
572
573 methods_parsed++;
574 // add method size here to guarantee that inlined methods are added too
575 if (CITime)
576 _total_bytes_compiled += method()->code_size();
577
578 show_parse_info();
579 #endif
580
581 if (failing()) {
582 if (log) log->done("parse");
583 return;
584 }
585
586 gvn().transform(top());
587
588 // Import the results of the ciTypeFlow.
589 init_blocks();
590
591 // Merge point for all normal exits
592 build_exits();
593
594 // Setup the initial JVM state map.
595 SafePointNode* entry_map = create_entry_map();
596
597 // Check for bailouts during map initialization
598 if (failing() || entry_map == nullptr) {
599 if (log) log->done("parse");
600 return;
601 }
602
603 Node_Notes* caller_nn = C->default_node_notes();
604 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
605 if (DebugInlinedCalls || depth() == 1) {
606 C->set_default_node_notes(make_node_notes(caller_nn));
607 }
608
609 if (is_osr_parse()) {
610 Node* osr_buf = entry_map->in(TypeFunc::Parms+0);
611 entry_map->set_req(TypeFunc::Parms+0, top());
612 set_map(entry_map);
613 load_interpreter_state(osr_buf);
614 } else {
615 set_map(entry_map);
616 do_method_entry();
617 }
618
619 if (depth() == 1 && !failing()) {
620 if (C->clinit_barrier_on_entry()) {
621 // Add check to deoptimize the nmethod once the holder class is fully initialized
622 clinit_deopt();
623 }
624 }
625
626 // Check for bailouts during method entry.
627 if (failing()) {
628 if (log) log->done("parse");
629 C->set_default_node_notes(caller_nn);
630 return;
631 }
632
633 // Handle inline type arguments
634 int arg_size = method()->arg_size();
635 for (int i = 0; i < arg_size; i++) {
636 Node* parm = local(i);
637 const Type* t = _gvn.type(parm);
638 if (t->is_inlinetypeptr()) {
639 // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
640 // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
641 if (!(method()->is_object_constructor() && i == 0)) {
642 // Create InlineTypeNode from the oop and replace the parameter
643 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
644 replace_in_map(parm, vt);
645 }
646 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
647 t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_flat() &&
648 (!t->is_aryptr()->is_not_null_free() || !t->is_aryptr()->is_not_flat())) {
649 // Speculate on varargs Object array being not null-free and not flat
650 const TypePtr* spec_type = t->speculative();
651 spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
652 spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free()->cast_to_not_flat();
653 spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
654 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
655 replace_in_map(parm, cast);
656 }
657 }
658
659 entry_map = map(); // capture any changes performed by method setup code
660 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
661
662 // We begin parsing as if we have just encountered a jump to the
663 // method entry.
664 Block* entry_block = start_block();
665 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
666 set_map_clone(entry_map);
667 merge_common(entry_block, entry_block->next_path_num());
668
669 #ifndef PRODUCT
670 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
671 set_parse_histogram( parse_histogram_obj );
672 #endif
673
674 // Parse all the basic blocks.
675 do_all_blocks();
676
677 // Check for bailouts during conversion to graph
678 if (failing()) {
679 if (log) log->done("parse");
680 return;
681 }
682
683 // Fix up all exiting control flow.
684 set_map(entry_map);
685 do_exits();
686
687 // Only reset this now, to make sure that debug information emitted
688 // for exiting control flow still refers to the inlined method.
689 C->set_default_node_notes(caller_nn);
690
691 if (log) log->done("parse nodes='%d' live='%d' memory='%zu'",
692 C->unique(), C->live_nodes(), C->node_arena()->used());
693 }
694
695 //---------------------------do_all_blocks-------------------------------------
696 void Parse::do_all_blocks() {
697 bool has_irreducible = flow()->has_irreducible_entry();
698
699 // Walk over all blocks in Reverse Post-Order.
700 while (true) {
701 bool progress = false;
702 for (int rpo = 0; rpo < block_count(); rpo++) {
703 Block* block = rpo_at(rpo);
704
705 if (block->is_parsed()) continue;
706
707 if (!block->is_merged()) {
708 // Dead block, no state reaches this block
709 continue;
710 }
711
712 // Prepare to parse this block.
713 load_state_from(block);
714
715 if (stopped()) {
716 // Block is dead.
717 continue;
718 }
719
720 NOT_PRODUCT(blocks_parsed++);
721
722 progress = true;
723 if (block->is_loop_head() || block->is_handler() || (has_irreducible && !block->is_ready())) {
724 // Not all preds have been parsed. We must build phis everywhere.
725 // (Note that dead locals do not get phis built, ever.)
726 ensure_phis_everywhere();
727
728 if (block->is_SEL_head()) {
729 // Add predicate to single entry (not irreducible) loop head.
730 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
731 // Predicates may have been added after a dominating if
732 if (!block->has_predicates()) {
733 // Need correct bci for predicate.
734 // It is fine to set it here since do_one_block() will set it anyway.
735 set_parse_bci(block->start());
736 add_parse_predicates();
737 }
738 // Add new region for back branches.
739 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
740 RegionNode *r = new RegionNode(edges+1);
741 _gvn.set_type(r, Type::CONTROL);
742 record_for_igvn(r);
743 r->init_req(edges, control());
744 set_control(r);
745 block->copy_irreducible_status_to(r, jvms());
746 // Add new phis.
747 ensure_phis_everywhere();
748 }
749
750 // Leave behind an undisturbed copy of the map, for future merges.
751 set_map(clone_map());
752 }
753
754 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
755 // In the absence of irreducible loops, the Region and Phis
756 // associated with a merge that doesn't involve a backedge can
757 // be simplified now since the RPO parsing order guarantees
758 // that any path which was supposed to reach here has already
759 // been parsed or must be dead.
760 Node* c = control();
761 Node* result = _gvn.transform(control());
762 if (c != result && TraceOptoParse) {
763 tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
764 }
765 if (result != top()) {
766 record_for_igvn(result);
767 }
768 }
769
770 // Parse the block.
771 do_one_block();
772
773 // Check for bailouts.
774 if (failing()) return;
775 }
776
777 // with irreducible loops multiple passes might be necessary to parse everything
778 if (!has_irreducible || !progress) {
779 break;
780 }
781 }
782
783 #ifndef PRODUCT
784 blocks_seen += block_count();
785
786 // Make sure there are no half-processed blocks remaining.
787 // Every remaining unprocessed block is dead and may be ignored now.
788 for (int rpo = 0; rpo < block_count(); rpo++) {
789 Block* block = rpo_at(rpo);
790 if (!block->is_parsed()) {
791 if (TraceOptoParse) {
792 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
793 }
794 assert(!block->is_merged(), "no half-processed blocks");
795 }
796 }
797 #endif
798 }
799
800 static Node* mask_int_value(Node* v, BasicType bt, PhaseGVN* gvn) {
801 switch (bt) {
802 case T_BYTE:
803 v = gvn->transform(new LShiftINode(v, gvn->intcon(24)));
804 v = gvn->transform(new RShiftINode(v, gvn->intcon(24)));
805 break;
806 case T_SHORT:
807 v = gvn->transform(new LShiftINode(v, gvn->intcon(16)));
808 v = gvn->transform(new RShiftINode(v, gvn->intcon(16)));
809 break;
810 case T_CHAR:
811 v = gvn->transform(new AndINode(v, gvn->intcon(0xFFFF)));
812 break;
813 case T_BOOLEAN:
814 v = gvn->transform(new AndINode(v, gvn->intcon(0x1)));
815 break;
816 default:
817 break;
818 }
819 return v;
820 }
821
822 //-------------------------------build_exits----------------------------------
823 // Build normal and exceptional exit merge points.
824 void Parse::build_exits() {
825 // make a clone of caller to prevent sharing of side-effects
826 _exits.set_map(_exits.clone_map());
827 _exits.clean_stack(_exits.sp());
828 _exits.sync_jvms();
829
830 RegionNode* region = new RegionNode(1);
831 record_for_igvn(region);
832 gvn().set_type_bottom(region);
833 _exits.set_control(region);
834
835 // Note: iophi and memphi are not transformed until do_exits.
836 Node* iophi = new PhiNode(region, Type::ABIO);
837 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
838 gvn().set_type_bottom(iophi);
839 gvn().set_type_bottom(memphi);
840 _exits.set_i_o(iophi);
841 _exits.set_all_memory(memphi);
842
843 // Add a return value to the exit state. (Do not push it yet.)
844 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
845 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
846 if (ret_type->isa_int()) {
847 BasicType ret_bt = method()->return_type()->basic_type();
848 if (ret_bt == T_BOOLEAN ||
849 ret_bt == T_CHAR ||
850 ret_bt == T_BYTE ||
851 ret_bt == T_SHORT) {
852 ret_type = TypeInt::INT;
853 }
854 }
855
856 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
857 // becomes loaded during the subsequent parsing, the loaded and unloaded
858 // types will not join when we transform and push in do_exits().
859 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
860 if (ret_oop_type && !ret_oop_type->is_loaded()) {
861 ret_type = TypeOopPtr::BOTTOM;
862 }
863 int ret_size = type2size[ret_type->basic_type()];
864 Node* ret_phi = new PhiNode(region, ret_type);
865 gvn().set_type_bottom(ret_phi);
866 _exits.ensure_stack(ret_size);
867 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
868 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
869 _exits.set_argument(0, ret_phi); // here is where the parser finds it
870 // Note: ret_phi is not yet pushed, until do_exits.
871 }
872 }
873
874 //----------------------------build_start_state-------------------------------
875 // Construct a state which contains only the incoming arguments from an
876 // unknown caller. The method & bci will be null & InvocationEntryBci.
877 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
878 int arg_size = tf->domain_sig()->cnt();
879 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
880 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
881 SafePointNode* map = new SafePointNode(max_size, jvms);
882 jvms->set_map(map);
883 record_for_igvn(map);
884 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
885 Node_Notes* old_nn = default_node_notes();
886 if (old_nn != nullptr && has_method()) {
887 Node_Notes* entry_nn = old_nn->clone(this);
888 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
889 entry_jvms->set_offsets(0);
890 entry_jvms->set_bci(entry_bci());
891 entry_nn->set_jvms(entry_jvms);
892 set_default_node_notes(entry_nn);
893 }
894 PhaseGVN& gvn = *initial_gvn();
895 uint i = 0;
896 int arg_num = 0;
897 for (uint j = 0; i < (uint)arg_size; i++) {
898 const Type* t = tf->domain_sig()->field_at(i);
899 Node* parm = nullptr;
900 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
901 // Inline type arguments are not passed by reference: we get an argument per
902 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
903 GraphKit kit(jvms, &gvn);
904 kit.set_control(map->control());
905 Node* old_mem = map->memory();
906 // Use immutable memory for inline type loads and restore it below
907 kit.set_all_memory(C->immutable_memory());
908 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
909 map->set_control(kit.control());
910 map->set_memory(old_mem);
911 } else {
912 parm = gvn.transform(new ParmNode(start, j++));
913 }
914 map->init_req(i, parm);
915 // Record all these guys for later GVN.
916 record_for_igvn(parm);
917 if (i >= TypeFunc::Parms && t != Type::HALF) {
918 arg_num++;
919 }
920 }
921 for (; i < map->req(); i++) {
922 map->init_req(i, top());
923 }
924 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
925 set_default_node_notes(old_nn);
926 return jvms;
927 }
928
929 //-----------------------------make_node_notes---------------------------------
930 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
931 if (caller_nn == nullptr) return nullptr;
932 Node_Notes* nn = caller_nn->clone(C);
933 JVMState* caller_jvms = nn->jvms();
934 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
935 jvms->set_offsets(0);
936 jvms->set_bci(_entry_bci);
937 nn->set_jvms(jvms);
938 return nn;
939 }
940
941
942 //--------------------------return_values--------------------------------------
943 void Compile::return_values(JVMState* jvms) {
944 GraphKit kit(jvms);
945 Node* ret = new ReturnNode(TypeFunc::Parms,
946 kit.control(),
947 kit.i_o(),
948 kit.reset_memory(),
949 kit.frameptr(),
950 kit.returnadr());
951 // Add zero or 1 return values
952 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
953 if (ret_size > 0) {
954 kit.inc_sp(-ret_size); // pop the return value(s)
955 kit.sync_jvms();
956 Node* res = kit.argument(0);
957 if (tf()->returns_inline_type_as_fields()) {
958 // Multiple return values (inline type fields): add as many edges
959 // to the Return node as returned values.
960 InlineTypeNode* vt = res->as_InlineType();
961 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
962 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
963 ret->init_req(TypeFunc::Parms, vt);
964 } else {
965 // Return the tagged klass pointer to signal scalarization to the caller
966 Node* tagged_klass = vt->tagged_klass(kit.gvn());
967 // Return null if the inline type is null (null marker field is not set)
968 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
969 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
970 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
971 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
972 ret->init_req(TypeFunc::Parms, tagged_klass);
973 }
974 uint idx = TypeFunc::Parms + 1;
975 vt->pass_fields(&kit, ret, idx, false, false);
976 } else {
977 ret->add_req(res);
978 // Note: The second dummy edge is not needed by a ReturnNode.
979 }
980 }
981 // bind it to root
982 root()->add_req(ret);
983 record_for_igvn(ret);
984 initial_gvn()->transform(ret);
985 }
986
987 //------------------------rethrow_exceptions-----------------------------------
988 // Bind all exception states in the list into a single RethrowNode.
989 void Compile::rethrow_exceptions(JVMState* jvms) {
990 GraphKit kit(jvms);
991 if (!kit.has_exceptions()) return; // nothing to generate
992 // Load my combined exception state into the kit, with all phis transformed:
993 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
994 Node* ex_oop = kit.use_exception_state(ex_map);
995 RethrowNode* exit = new RethrowNode(kit.control(),
996 kit.i_o(), kit.reset_memory(),
997 kit.frameptr(), kit.returnadr(),
998 // like a return but with exception input
999 ex_oop);
1000 // bind to root
1001 root()->add_req(exit);
1002 record_for_igvn(exit);
1003 initial_gvn()->transform(exit);
1004 }
1005
1006 //---------------------------do_exceptions-------------------------------------
1007 // Process exceptions arising from the current bytecode.
1008 // Send caught exceptions to the proper handler within this method.
1009 // Unhandled exceptions feed into _exit.
1010 void Parse::do_exceptions() {
1011 if (!has_exceptions()) return;
1012
1013 if (failing()) {
1014 // Pop them all off and throw them away.
1015 while (pop_exception_state() != nullptr) ;
1016 return;
1017 }
1018
1019 PreserveJVMState pjvms(this, false);
1020
1021 SafePointNode* ex_map;
1022 while ((ex_map = pop_exception_state()) != nullptr) {
1023 if (!method()->has_exception_handlers()) {
1024 // Common case: Transfer control outward.
1025 // Doing it this early allows the exceptions to common up
1026 // even between adjacent method calls.
1027 throw_to_exit(ex_map);
1028 } else {
1029 // Have to look at the exception first.
1030 assert(stopped(), "catch_inline_exceptions trashes the map");
1031 catch_inline_exceptions(ex_map);
1032 stop_and_kill_map(); // we used up this exception state; kill it
1033 }
1034 }
1035
1036 // We now return to our regularly scheduled program:
1037 }
1038
1039 //---------------------------throw_to_exit-------------------------------------
1040 // Merge the given map into an exception exit from this method.
1041 // The exception exit will handle any unlocking of receiver.
1042 // The ex_oop must be saved within the ex_map, unlike merge_exception.
1043 void Parse::throw_to_exit(SafePointNode* ex_map) {
1044 // Pop the JVMS to (a copy of) the caller.
1045 GraphKit caller;
1046 caller.set_map_clone(_caller->map());
1047 caller.set_bci(_caller->bci());
1048 caller.set_sp(_caller->sp());
1049 // Copy out the standard machine state:
1050 for (uint i = 0; i < TypeFunc::Parms; i++) {
1051 caller.map()->set_req(i, ex_map->in(i));
1052 }
1053 if (ex_map->has_replaced_nodes()) {
1054 _replaced_nodes_for_exceptions = true;
1055 }
1056 caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx);
1057 // ...and the exception:
1058 Node* ex_oop = saved_ex_oop(ex_map);
1059 SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
1060 // Finally, collect the new exception state in my exits:
1061 _exits.add_exception_state(caller_ex_map);
1062 }
1063
1064 //------------------------------do_exits---------------------------------------
1065 void Parse::do_exits() {
1066 set_parse_bci(InvocationEntryBci);
1067
1068 // Now peephole on the return bits
1069 Node* region = _exits.control();
1070 _exits.set_control(gvn().transform(region));
1071
1072 Node* iophi = _exits.i_o();
1073 _exits.set_i_o(gvn().transform(iophi));
1074
1075 // Figure out if we need to emit the trailing barrier. The barrier is only
1076 // needed in the constructors, and only in three cases:
1077 //
1078 // 1. The constructor wrote a final or a @Stable field. All these
1079 // initializations must be ordered before any code after the constructor
1080 // publishes the reference to the newly constructed object. Rather
1081 // than wait for the publication, we simply block the writes here.
1082 // Rather than put a barrier on only those writes which are required
1083 // to complete, we force all writes to complete.
1084 //
1085 // 2. Experimental VM option is used to force the barrier if any field
1086 // was written out in the constructor.
1087 //
1088 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1089 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1090 // MemBarVolatile is used before volatile load instead of after volatile
1091 // store, so there's no barrier after the store.
1092 // We want to guarantee the same behavior as on platforms with total store
1093 // order, although this is not required by the Java memory model.
1094 // In this case, we want to enforce visibility of volatile field
1095 // initializations which are performed in constructors.
1096 // So as with finals, we add a barrier here.
1097 //
1098 // "All bets are off" unless the first publication occurs after a
1099 // normal return from the constructor. We do not attempt to detect
1100 // such unusual early publications. But no barrier is needed on
1101 // exceptional returns, since they cannot publish normally.
1102 //
1103 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1104 (wrote_final() || wrote_stable() ||
1105 (AlwaysSafeConstructors && wrote_fields()) ||
1106 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1107 Node* recorded_alloc = alloc_with_final_or_stable();
1108 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1109 recorded_alloc);
1110
1111 // If Memory barrier is created for final fields write
1112 // and allocation node does not escape the initialize method,
1113 // then barrier introduced by allocation node can be removed.
1114 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1115 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1116 alloc->compute_MemBar_redundancy(method());
1117 }
1118 if (PrintOpto && (Verbose || WizardMode)) {
1119 method()->print_name();
1120 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1121 }
1122 }
1123
1124 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1125 // transform each slice of the original memphi:
1126 mms.set_memory(_gvn.transform(mms.memory()));
1127 }
1128 // Clean up input MergeMems created by transforming the slices
1129 _gvn.transform(_exits.merged_memory());
1130
1131 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1132 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1133 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1134 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1135 // If the type we set for the ret_phi in build_exits() is too optimistic and
1136 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1137 // loading. It could also be due to an error, so mark this method as not compilable because
1138 // otherwise this could lead to an infinite compile loop.
1139 // In any case, this code path is rarely (and never in my testing) reached.
1140 C->record_method_not_compilable("Can't determine return type.");
1141 return;
1142 }
1143 if (ret_type->isa_int()) {
1144 BasicType ret_bt = method()->return_type()->basic_type();
1145 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1146 }
1147 _exits.push_node(ret_type->basic_type(), ret_phi);
1148 }
1149
1150 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1151
1152 // Unlock along the exceptional paths.
1153 // This is done late so that we can common up equivalent exceptions
1154 // (e.g., null checks) arising from multiple points within this method.
1155 // See GraphKit::add_exception_state, which performs the commoning.
1156 bool do_synch = method()->is_synchronized();
1157
1158 // record exit from a method if compiled while Dtrace is turned on.
1159 if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
1160 // First move the exception list out of _exits:
1161 GraphKit kit(_exits.transfer_exceptions_into_jvms());
1162 SafePointNode* normal_map = kit.map(); // keep this guy safe
1163 // Now re-collect the exceptions into _exits:
1164 SafePointNode* ex_map;
1165 while ((ex_map = kit.pop_exception_state()) != nullptr) {
1166 Node* ex_oop = kit.use_exception_state(ex_map);
1167 // Force the exiting JVM state to have this method at InvocationEntryBci.
1168 // The exiting JVM state is otherwise a copy of the calling JVMS.
1169 JVMState* caller = kit.jvms();
1170 JVMState* ex_jvms = caller->clone_shallow(C);
1171 ex_jvms->bind_map(kit.clone_map());
1172 ex_jvms->set_bci( InvocationEntryBci);
1173 kit.set_jvms(ex_jvms);
1174 if (do_synch) {
1175 // Add on the synchronized-method box/object combo
1176 kit.map()->push_monitor(_synch_lock);
1177 // Unlock!
1178 kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
1179 }
1180 if (C->env()->dtrace_method_probes()) {
1181 kit.make_dtrace_method_exit(method());
1182 }
1183 if (_replaced_nodes_for_exceptions) {
1184 kit.map()->apply_replaced_nodes(_new_idx);
1185 }
1186 // Done with exception-path processing.
1187 ex_map = kit.make_exception_state(ex_oop);
1188 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
1189 // Pop the last vestige of this method:
1190 caller->clone_shallow(C)->bind_map(ex_map);
1191 _exits.push_exception_state(ex_map);
1192 }
1193 assert(_exits.map() == normal_map, "keep the same return state");
1194 }
1195
1196 {
1197 // Capture very early exceptions (receiver null checks) from caller JVMS
1198 GraphKit caller(_caller);
1199 SafePointNode* ex_map;
1200 while ((ex_map = caller.pop_exception_state()) != nullptr) {
1201 _exits.add_exception_state(ex_map);
1202 }
1203 }
1204 _exits.map()->apply_replaced_nodes(_new_idx);
1205 }
1206
1207 //-----------------------------create_entry_map-------------------------------
1208 // Initialize our parser map to contain the types at method entry.
1209 // For OSR, the map contains a single RawPtr parameter.
1210 // Initial monitor locking for sync. methods is performed by do_method_entry.
1211 SafePointNode* Parse::create_entry_map() {
1212 // Check for really stupid bail-out cases.
1213 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1214 if (len >= 32760) {
1215 // Bailout expected, this is a very rare edge case.
1216 C->record_method_not_compilable("too many local variables");
1217 return nullptr;
1218 }
1219
1220 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1221 _caller->map()->delete_replaced_nodes();
1222
1223 // If this is an inlined method, we may have to do a receiver null check.
1224 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1225 GraphKit kit(_caller);
1226 Node* receiver = kit.argument(0);
1227 Node* null_free = kit.null_check_receiver_before_call(method());
1228 _caller = kit.transfer_exceptions_into_jvms();
1229
1230 if (kit.stopped()) {
1231 _exits.add_exception_states_from(_caller);
1232 _exits.set_jvms(_caller);
1233 return nullptr;
1234 }
1235 }
1236
1237 assert(method() != nullptr, "parser must have a method");
1238
1239 // Create an initial safepoint to hold JVM state during parsing
1240 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1241 set_map(new SafePointNode(len, jvms));
1242
1243 // Capture receiver info for compiled lambda forms.
1244 if (method()->is_compiled_lambda_form()) {
1245 ciInstance* recv_info = _caller->compute_receiver_info(method());
1246 jvms->set_receiver_info(recv_info);
1247 }
1248
1249 jvms->set_map(map());
1250 record_for_igvn(map());
1251 assert(jvms->endoff() == len, "correct jvms sizing");
1252
1253 SafePointNode* inmap = _caller->map();
1254 assert(inmap != nullptr, "must have inmap");
1255 // In case of null check on receiver above
1256 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1257
1258 uint i;
1259
1260 // Pass thru the predefined input parameters.
1261 for (i = 0; i < TypeFunc::Parms; i++) {
1262 map()->init_req(i, inmap->in(i));
1263 }
1264
1265 if (depth() == 1) {
1266 assert(map()->memory()->Opcode() == Op_Parm, "");
1267 // Insert the memory aliasing node
1268 set_all_memory(reset_memory());
1269 }
1270 assert(merged_memory(), "");
1271
1272 // Now add the locals which are initially bound to arguments:
1273 uint arg_size = tf()->domain_sig()->cnt();
1274 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1275 for (i = TypeFunc::Parms; i < arg_size; i++) {
1276 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1277 }
1278
1279 // Clear out the rest of the map (locals and stack)
1280 for (i = arg_size; i < len; i++) {
1281 map()->init_req(i, top());
1282 }
1283
1284 SafePointNode* entry_map = stop();
1285 return entry_map;
1286 }
1287
1288 //-----------------------------do_method_entry--------------------------------
1289 // Emit any code needed in the pseudo-block before BCI zero.
1290 // The main thing to do is lock the receiver of a synchronized method.
1291 void Parse::do_method_entry() {
1292 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1293 set_sp(0); // Java Stack Pointer
1294
1295 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1296
1297 // Check if we need a membar at the beginning of the java.lang.Object
1298 // constructor to satisfy the memory model for strict fields.
1299 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1300 Node* receiver_obj = local(0);
1301 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1302 // If there's no exact type, check if the declared type has no implementors and add a dependency
1303 const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1304 ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1305 if (klass != nullptr && klass->is_instance_klass()) {
1306 // Exact receiver type, check if there is a strict field
1307 ciInstanceKlass* holder = klass->as_instance_klass();
1308 for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1309 ciField* field = holder->nonstatic_field_at(i);
1310 if (field->is_strict()) {
1311 // Found a strict field, a membar is needed
1312 AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1313 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1314 if (DoEscapeAnalysis && (alloc != nullptr)) {
1315 alloc->compute_MemBar_redundancy(method());
1316 }
1317 break;
1318 }
1319 }
1320 } else if (klass == nullptr) {
1321 // We can't statically determine the type of the receiver and therefore need
1322 // to put a membar here because it could have a strict field.
1323 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1324 }
1325 }
1326
1327 if (C->env()->dtrace_method_probes()) {
1328 make_dtrace_method_entry(method());
1329 }
1330
1331 #ifdef ASSERT
1332 // Narrow receiver type when it is too broad for the method being parsed.
1333 if (!method()->is_static()) {
1334 ciInstanceKlass* callee_holder = method()->holder();
1335 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1336
1337 Node* receiver_obj = local(0);
1338 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1339
1340 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1341 // Receiver should always be a subtype of callee holder.
1342 // But, since C2 type system doesn't properly track interfaces,
1343 // the invariant can't be expressed in the type system for default methods.
1344 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1345 assert(callee_holder->is_interface(), "missing subtype check");
1346
1347 // Perform dynamic receiver subtype check against callee holder class w/ a halt on failure.
1348 Node* holder_klass = _gvn.makecon(TypeKlassPtr::make(callee_holder, Type::trust_interfaces));
1349 Node* not_subtype_ctrl = gen_subtype_check(receiver_obj, holder_klass);
1350 assert(!stopped(), "not a subtype");
1351
1352 Node* halt = _gvn.transform(new HaltNode(not_subtype_ctrl, frameptr(), "failed receiver subtype check"));
1353 C->root()->add_req(halt);
1354 }
1355 }
1356 #endif // ASSERT
1357
1358 // If the method is synchronized, we need to construct a lock node, attach
1359 // it to the Start node, and pin it there.
1360 if (method()->is_synchronized()) {
1361 // Insert a FastLockNode right after the Start which takes as arguments
1362 // the current thread pointer, the "this" pointer & the address of the
1363 // stack slot pair used for the lock. The "this" pointer is a projection
1364 // off the start node, but the locking spot has to be constructed by
1365 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1366 // becomes the second argument to the FastLockNode call. The
1367 // FastLockNode becomes the new control parent to pin it to the start.
1368
1369 // Setup Object Pointer
1370 Node *lock_obj = nullptr;
1371 if (method()->is_static()) {
1372 ciInstance* mirror = _method->holder()->java_mirror();
1373 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1374 lock_obj = makecon(t_lock);
1375 } else { // Else pass the "this" pointer,
1376 lock_obj = local(0); // which is Parm0 from StartNode
1377 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1378 }
1379 // Clear out dead values from the debug info.
1380 kill_dead_locals();
1381 // Build the FastLockNode
1382 _synch_lock = shared_lock(lock_obj);
1383 // Check for bailout in shared_lock
1384 if (failing()) { return; }
1385 }
1386
1387 // Feed profiling data for parameters to the type system so it can
1388 // propagate it as speculative types
1389 record_profiled_parameters_for_speculation();
1390 }
1391
1392 //------------------------------init_blocks------------------------------------
1393 // Initialize our parser map to contain the types/monitors at method entry.
1394 void Parse::init_blocks() {
1395 // Create the blocks.
1396 _block_count = flow()->block_count();
1397 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1398
1399 // Initialize the structs.
1400 for (int rpo = 0; rpo < block_count(); rpo++) {
1401 Block* block = rpo_at(rpo);
1402 new(block) Block(this, rpo);
1403 }
1404
1405 // Collect predecessor and successor information.
1406 for (int rpo = 0; rpo < block_count(); rpo++) {
1407 Block* block = rpo_at(rpo);
1408 block->init_graph(this);
1409 }
1410 }
1411
1412 //-------------------------------init_node-------------------------------------
1413 Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
1414 _flow = outer->flow()->rpo_at(rpo);
1415 _pred_count = 0;
1416 _preds_parsed = 0;
1417 _count = 0;
1418 _is_parsed = false;
1419 _is_handler = false;
1420 _has_merged_backedge = false;
1421 _start_map = nullptr;
1422 _has_predicates = false;
1423 _num_successors = 0;
1424 _all_successors = 0;
1425 _successors = nullptr;
1426 assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
1427 assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
1428 assert(_live_locals.size() == 0, "sanity");
1429
1430 // entry point has additional predecessor
1431 if (flow()->is_start()) _pred_count++;
1432 assert(flow()->is_start() == (this == outer->start_block()), "");
1433 }
1434
1435 //-------------------------------init_graph------------------------------------
1436 void Parse::Block::init_graph(Parse* outer) {
1437 // Create the successor list for this parser block.
1438 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
1439 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
1440 int ns = tfs->length();
1441 int ne = tfe->length();
1442 _num_successors = ns;
1443 _all_successors = ns+ne;
1444 _successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne);
1445 int p = 0;
1446 for (int i = 0; i < ns+ne; i++) {
1447 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
1448 Block* block2 = outer->rpo_at(tf2->rpo());
1449 _successors[i] = block2;
1450
1451 // Accumulate pred info for the other block, too.
1452 // Note: We also need to set _pred_count for exception blocks since they could
1453 // also have normal predecessors (reached without athrow by an explicit jump).
1454 // This also means that next_path_num can be called along exception paths.
1455 block2->_pred_count++;
1456 if (i >= ns) {
1457 block2->_is_handler = true;
1458 }
1459
1460 #ifdef ASSERT
1461 // A block's successors must be distinguishable by BCI.
1462 // That is, no bytecode is allowed to branch to two different
1463 // clones of the same code location.
1464 for (int j = 0; j < i; j++) {
1465 Block* block1 = _successors[j];
1466 if (block1 == block2) continue; // duplicates are OK
1467 assert(block1->start() != block2->start(), "successors have unique bcis");
1468 }
1469 #endif
1470 }
1471 }
1472
1473 //---------------------------successor_for_bci---------------------------------
1474 Parse::Block* Parse::Block::successor_for_bci(int bci) {
1475 for (int i = 0; i < all_successors(); i++) {
1476 Block* block2 = successor_at(i);
1477 if (block2->start() == bci) return block2;
1478 }
1479 // We can actually reach here if ciTypeFlow traps out a block
1480 // due to an unloaded class, and concurrently with compilation the
1481 // class is then loaded, so that a later phase of the parser is
1482 // able to see more of the bytecode CFG. Or, the flow pass and
1483 // the parser can have a minor difference of opinion about executability
1484 // of bytecodes. For example, "obj.field = null" is executable even
1485 // if the field's type is an unloaded class; the flow pass used to
1486 // make a trap for such code.
1487 return nullptr;
1488 }
1489
1490
1491 //-----------------------------stack_type_at-----------------------------------
1492 const Type* Parse::Block::stack_type_at(int i) const {
1493 return get_type(flow()->stack_type_at(i));
1494 }
1495
1496
1497 //-----------------------------local_type_at-----------------------------------
1498 const Type* Parse::Block::local_type_at(int i) const {
1499 // Make dead locals fall to bottom.
1500 if (_live_locals.size() == 0) {
1501 MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
1502 // This bitmap can be zero length if we saw a breakpoint.
1503 // In such cases, pretend they are all live.
1504 ((Block*)this)->_live_locals = live_locals;
1505 }
1506 if (_live_locals.size() > 0 && !_live_locals.at(i))
1507 return Type::BOTTOM;
1508
1509 return get_type(flow()->local_type_at(i));
1510 }
1511
1512
1513 #ifndef PRODUCT
1514
1515 //----------------------------name_for_bc--------------------------------------
1516 // helper method for BytecodeParseHistogram
1517 static const char* name_for_bc(int i) {
1518 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
1519 }
1520
1521 //----------------------------BytecodeParseHistogram------------------------------------
1522 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
1523 _parser = p;
1524 _compiler = c;
1525 if( ! _initialized ) { _initialized = true; reset(); }
1526 }
1527
1528 //----------------------------current_count------------------------------------
1529 int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) {
1530 switch( bph_type ) {
1531 case BPH_transforms: { return _parser->gvn().made_progress(); }
1532 case BPH_values: { return _parser->gvn().made_new_values(); }
1533 default: { ShouldNotReachHere(); return 0; }
1534 }
1535 }
1536
1537 //----------------------------initialized--------------------------------------
1538 bool Parse::BytecodeParseHistogram::initialized() { return _initialized; }
1539
1540 //----------------------------reset--------------------------------------------
1541 void Parse::BytecodeParseHistogram::reset() {
1542 int i = Bytecodes::number_of_codes;
1543 while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; }
1544 }
1545
1546 //----------------------------set_initial_state--------------------------------
1547 // Record info when starting to parse one bytecode
1548 void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) {
1549 if( PrintParseStatistics && !_parser->is_osr_parse() ) {
1550 _initial_bytecode = bc;
1551 _initial_node_count = _compiler->unique();
1552 _initial_transforms = current_count(BPH_transforms);
1553 _initial_values = current_count(BPH_values);
1554 }
1555 }
1556
1557 //----------------------------record_change--------------------------------
1558 // Record results of parsing one bytecode
1559 void Parse::BytecodeParseHistogram::record_change() {
1560 if( PrintParseStatistics && !_parser->is_osr_parse() ) {
1561 ++_bytecodes_parsed[_initial_bytecode];
1562 _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count);
1563 _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms);
1564 _new_values [_initial_bytecode] += (current_count(BPH_values) - _initial_values);
1565 }
1566 }
1567
1568
1569 //----------------------------print--------------------------------------------
1570 void Parse::BytecodeParseHistogram::print(float cutoff) {
1571 ResourceMark rm;
1572 // print profile
1573 int total = 0;
1574 int i = 0;
1575 for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; }
1576 int abs_sum = 0;
1577 tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789
1578 tty->print_cr("Histogram of %d parsed bytecodes:", total);
1579 if( total == 0 ) { return; }
1580 tty->cr();
1581 tty->print_cr("absolute: count of compiled bytecodes of this type");
1582 tty->print_cr("relative: percentage contribution to compiled nodes");
1583 tty->print_cr("nodes : Average number of nodes constructed per bytecode");
1584 tty->print_cr("rnodes : Significance towards total nodes constructed, (nodes*relative)");
1585 tty->print_cr("transforms: Average amount of transform progress per bytecode compiled");
1586 tty->print_cr("values : Average number of node values improved per bytecode");
1587 tty->print_cr("name : Bytecode name");
1588 tty->cr();
1589 tty->print_cr(" absolute relative nodes rnodes transforms values name");
1590 tty->print_cr("----------------------------------------------------------------------");
1591 while (--i > 0) {
1592 int abs = _bytecodes_parsed[i];
1593 float rel = abs * 100.0F / total;
1594 float nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i];
1595 float rnodes = _bytecodes_parsed[i] == 0 ? 0 : rel * nodes;
1596 float xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i];
1597 float values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values [i])/_bytecodes_parsed[i];
1598 if (cutoff <= rel) {
1599 tty->print_cr("%10d %7.2f%% %6.1f %6.2f %6.1f %6.1f %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i));
1600 abs_sum += abs;
1601 }
1602 }
1603 tty->print_cr("----------------------------------------------------------------------");
1604 float rel_sum = abs_sum * 100.0F / total;
1605 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
1606 tty->print_cr("----------------------------------------------------------------------");
1607 tty->cr();
1608 }
1609 #endif
1610
1611 //----------------------------load_state_from----------------------------------
1612 // Load block/map/sp. But not do not touch iter/bci.
1613 void Parse::load_state_from(Block* block) {
1614 set_block(block);
1615 // load the block's JVM state:
1616 set_map(block->start_map());
1617 set_sp( block->start_sp());
1618 }
1619
1620
1621 //-----------------------------record_state------------------------------------
1622 void Parse::Block::record_state(Parse* p) {
1623 assert(!is_merged(), "can only record state once, on 1st inflow");
1624 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
1625 set_start_map(p->stop());
1626 }
1627
1628
1629 //------------------------------do_one_block-----------------------------------
1630 void Parse::do_one_block() {
1631 if (TraceOptoParse) {
1632 Block *b = block();
1633 int ns = b->num_successors();
1634 int nt = b->all_successors();
1635
1636 tty->print("Parsing block #%d at bci [%d,%d), successors:",
1637 block()->rpo(), block()->start(), block()->limit());
1638 for (int i = 0; i < nt; i++) {
1639 tty->print((( i < ns) ? " %d" : " %d(exception block)"), b->successor_at(i)->rpo());
1640 }
1641 if (b->is_loop_head()) {
1642 tty->print(" loop head");
1643 }
1644 if (b->is_irreducible_loop_entry()) {
1645 tty->print(" irreducible");
1646 }
1647 tty->cr();
1648 }
1649
1650 assert(block()->is_merged(), "must be merged before being parsed");
1651 block()->mark_parsed();
1652
1653 // Set iterator to start of block.
1654 iter().reset_to_bci(block()->start());
1655
1656 if (ProfileExceptionHandlers && block()->is_handler()) {
1657 ciMethodData* methodData = method()->method_data();
1658 if (methodData->is_mature()) {
1659 ciBitData data = methodData->exception_handler_bci_to_data(block()->start());
1660 if (!data.exception_handler_entered() || StressPrunedExceptionHandlers) {
1661 // dead catch block
1662 // Emit an uncommon trap instead of processing the block.
1663 set_parse_bci(block()->start());
1664 uncommon_trap(Deoptimization::Reason_unreached,
1665 Deoptimization::Action_reinterpret,
1666 nullptr, "dead catch block");
1667 return;
1668 }
1669 }
1670 }
1671
1672 CompileLog* log = C->log();
1673
1674 // Parse bytecodes
1675 while (!stopped() && !failing()) {
1676 iter().next();
1677
1678 // Learn the current bci from the iterator:
1679 set_parse_bci(iter().cur_bci());
1680
1681 if (bci() == block()->limit()) {
1682 // Do not walk into the next block until directed by do_all_blocks.
1683 merge(bci());
1684 break;
1685 }
1686 assert(bci() < block()->limit(), "bci still in block");
1687
1688 if (log != nullptr) {
1689 // Output an optional context marker, to help place actions
1690 // that occur during parsing of this BC. If there is no log
1691 // output until the next context string, this context string
1692 // will be silently ignored.
1693 log->set_context("bc code='%d' bci='%d'", (int)bc(), bci());
1694 }
1695
1696 if (block()->has_trap_at(bci())) {
1697 // We must respect the flow pass's traps, because it will refuse
1698 // to produce successors for trapping blocks.
1699 int trap_index = block()->flow()->trap_index();
1700 assert(trap_index != 0, "trap index must be valid");
1701 uncommon_trap(trap_index);
1702 break;
1703 }
1704
1705 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
1706
1707 #ifdef ASSERT
1708 int pre_bc_sp = sp();
1709 int inputs, depth;
1710 bool have_se = !stopped() && compute_stack_effects(inputs, depth);
1711 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs);
1712 #endif //ASSERT
1713
1714 do_one_bytecode();
1715 if (failing()) return;
1716
1717 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
1718 "incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth);
1719
1720 do_exceptions();
1721
1722 NOT_PRODUCT( parse_histogram()->record_change(); );
1723
1724 if (log != nullptr)
1725 log->clear_context(); // skip marker if nothing was printed
1726
1727 // Fall into next bytecode. Each bytecode normally has 1 sequential
1728 // successor which is typically made ready by visiting this bytecode.
1729 // If the successor has several predecessors, then it is a merge
1730 // point, starts a new basic block, and is handled like other basic blocks.
1731 }
1732 }
1733
1734
1735 //------------------------------merge------------------------------------------
1736 void Parse::set_parse_bci(int bci) {
1737 set_bci(bci);
1738 Node_Notes* nn = C->default_node_notes();
1739 if (nn == nullptr) return;
1740
1741 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
1742 if (!DebugInlinedCalls && depth() > 1) {
1743 return;
1744 }
1745
1746 // Update the JVMS annotation, if present.
1747 JVMState* jvms = nn->jvms();
1748 if (jvms != nullptr && jvms->bci() != bci) {
1749 // Update the JVMS.
1750 jvms = jvms->clone_shallow(C);
1751 jvms->set_bci(bci);
1752 nn->set_jvms(jvms);
1753 }
1754 }
1755
1756 //------------------------------merge------------------------------------------
1757 // Merge the current mapping into the basic block starting at bci
1758 void Parse::merge(int target_bci) {
1759 Block* target = successor_for_bci(target_bci);
1760 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1761 assert(!target->is_ready(), "our arrival must be expected");
1762 int pnum = target->next_path_num();
1763 merge_common(target, pnum);
1764 }
1765
1766 //-------------------------merge_new_path--------------------------------------
1767 // Merge the current mapping into the basic block, using a new path
1768 void Parse::merge_new_path(int target_bci) {
1769 Block* target = successor_for_bci(target_bci);
1770 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1771 assert(!target->is_ready(), "new path into frozen graph");
1772 int pnum = target->add_new_path();
1773 merge_common(target, pnum);
1774 }
1775
1776 //-------------------------merge_exception-------------------------------------
1777 // Merge the current mapping into the basic block starting at bci
1778 // The ex_oop must be pushed on the stack, unlike throw_to_exit.
1779 void Parse::merge_exception(int target_bci) {
1780 #ifdef ASSERT
1781 if (target_bci <= bci()) {
1782 C->set_exception_backedge();
1783 }
1784 #endif
1785 assert(sp() == 1, "must have only the throw exception on the stack");
1786 Block* target = successor_for_bci(target_bci);
1787 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1788 assert(target->is_handler(), "exceptions are handled by special blocks");
1789 int pnum = target->add_new_path();
1790 merge_common(target, pnum);
1791 }
1792
1793 //--------------------handle_missing_successor---------------------------------
1794 void Parse::handle_missing_successor(int target_bci) {
1795 #ifndef PRODUCT
1796 Block* b = block();
1797 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1798 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1799 #endif
1800 ShouldNotReachHere();
1801 }
1802
1803 //--------------------------merge_common---------------------------------------
1804 void Parse::merge_common(Parse::Block* target, int pnum) {
1805 if (TraceOptoParse) {
1806 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1807 }
1808
1809 // Zap extra stack slots to top
1810 assert(sp() == target->start_sp(), "");
1811 clean_stack(sp());
1812
1813 // Check for merge conflicts involving inline types
1814 JVMState* old_jvms = map()->jvms();
1815 int old_bci = bci();
1816 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1817 tmp_jvms->set_should_reexecute(true);
1818 tmp_jvms->bind_map(map());
1819 // Execution needs to restart a the next bytecode (entry of next
1820 // block)
1821 if (target->is_merged() ||
1822 pnum > PhiNode::Input ||
1823 target->is_handler() ||
1824 target->is_loop_head()) {
1825 set_parse_bci(target->start());
1826 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1827 Node* n = map()->in(j); // Incoming change to target state.
1828 const Type* t = nullptr;
1829 if (tmp_jvms->is_loc(j)) {
1830 t = target->local_type_at(j - tmp_jvms->locoff());
1831 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1832 t = target->stack_type_at(j - tmp_jvms->stkoff());
1833 }
1834 if (t != nullptr && t != Type::BOTTOM) {
1835 // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1836 // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1837 // merge is an oop, each merge input needs to be either an oop or an buffered
1838 // InlineTypeNode.
1839 if (!t->is_inlinetypeptr()) {
1840 // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1841 // InlineTypeNode
1842 if (n->is_InlineType()) {
1843 map()->set_req(j, n->as_InlineType()->buffer(this));
1844 }
1845 } else {
1846 // Since the merge is a value object, it can either be an oop or an InlineTypeNode
1847 if (!target->is_merged()) {
1848 // This is the first processed input of the merge. If it is an InlineTypeNode, the
1849 // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be
1850 // scalarized as well. However, we cannot blindly scalarize an inline type oop here
1851 // since it may be larval
1852 if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) {
1853 // Null constant implies that this is not a larval object
1854 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1855 }
1856 } else {
1857 Node* phi = target->start_map()->in(j);
1858 if (phi->is_InlineType()) {
1859 // Larval oops cannot be merged with non-larval ones, and since the merge point is
1860 // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge
1861 // into phi
1862 if (!n->is_InlineType()) {
1863 map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass()));
1864 }
1865 } else {
1866 // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode
1867 if (n->is_InlineType()) {
1868 map()->set_req(j, n->as_InlineType()->buffer(this));
1869 }
1870 }
1871 }
1872 }
1873 }
1874 }
1875 }
1876 old_jvms->bind_map(map());
1877 set_parse_bci(old_bci);
1878
1879 if (!target->is_merged()) { // No prior mapping at this bci
1880 if (TraceOptoParse) { tty->print(" with empty state"); }
1881
1882 // If this path is dead, do not bother capturing it as a merge.
1883 // It is "as if" we had 1 fewer predecessors from the beginning.
1884 if (stopped()) {
1885 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1886 return;
1887 }
1888
1889 // Make a region if we know there are multiple or unpredictable inputs.
1890 // (Also, if this is a plain fall-through, we might see another region,
1891 // which must not be allowed into this block's map.)
1892 if (pnum > PhiNode::Input // Known multiple inputs.
1893 || target->is_handler() // These have unpredictable inputs.
1894 || target->is_loop_head() // Known multiple inputs
1895 || control()->is_Region()) { // We must hide this guy.
1896
1897 int current_bci = bci();
1898 set_parse_bci(target->start()); // Set target bci
1899 if (target->is_SEL_head()) {
1900 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1901 if (target->start() == 0) {
1902 // Add Parse Predicates for the special case when
1903 // there are backbranches to the method entry.
1904 add_parse_predicates();
1905 }
1906 }
1907 // Add a Region to start the new basic block. Phis will be added
1908 // later lazily.
1909 int edges = target->pred_count();
1910 if (edges < pnum) edges = pnum; // might be a new path!
1911 RegionNode *r = new RegionNode(edges+1);
1912 gvn().set_type(r, Type::CONTROL);
1913 record_for_igvn(r);
1914 // zap all inputs to null for debugging (done in Node(uint) constructor)
1915 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1916 r->init_req(pnum, control());
1917 set_control(r);
1918 target->copy_irreducible_status_to(r, jvms());
1919 set_parse_bci(current_bci); // Restore bci
1920 }
1921
1922 // Convert the existing Parser mapping into a mapping at this bci.
1923 store_state_to(target);
1924 assert(target->is_merged(), "do not come here twice");
1925
1926 } else { // Prior mapping at this bci
1927 if (TraceOptoParse) { tty->print(" with previous state"); }
1928 #ifdef ASSERT
1929 if (target->is_SEL_head()) {
1930 target->mark_merged_backedge(block());
1931 }
1932 #endif
1933
1934 // We must not manufacture more phis if the target is already parsed.
1935 bool nophi = target->is_parsed();
1936
1937 SafePointNode* newin = map();// Hang on to incoming mapping
1938 Block* save_block = block(); // Hang on to incoming block;
1939 load_state_from(target); // Get prior mapping
1940
1941 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1942 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1943 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1944 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1945
1946 // Iterate over my current mapping and the old mapping.
1947 // Where different, insert Phi functions.
1948 // Use any existing Phi functions.
1949 assert(control()->is_Region(), "must be merging to a region");
1950 RegionNode* r = control()->as_Region();
1951
1952 // Compute where to merge into
1953 // Merge incoming control path
1954 r->init_req(pnum, newin->control());
1955
1956 if (pnum == 1) { // Last merge for this Region?
1957 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1958 Node* result = _gvn.transform(r);
1959 if (r != result && TraceOptoParse) {
1960 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1961 }
1962 }
1963 record_for_igvn(r);
1964 }
1965
1966 // Update all the non-control inputs to map:
1967 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1968 bool check_elide_phi = target->is_SEL_backedge(save_block);
1969 bool last_merge = (pnum == PhiNode::Input);
1970 for (uint j = 1; j < newin->req(); j++) {
1971 Node* m = map()->in(j); // Current state of target.
1972 Node* n = newin->in(j); // Incoming change to target state.
1973 Node* phi;
1974 if (m->is_Phi() && m->as_Phi()->region() == r) {
1975 phi = m;
1976 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1977 phi = m;
1978 } else {
1979 phi = nullptr;
1980 }
1981 if (m != n) { // Different; must merge
1982 switch (j) {
1983 // Frame pointer and Return Address never changes
1984 case TypeFunc::FramePtr:// Drop m, use the original value
1985 case TypeFunc::ReturnAdr:
1986 break;
1987 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1988 assert(phi == nullptr, "the merge contains phis, not vice versa");
1989 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1990 continue;
1991 default: // All normal stuff
1992 if (phi == nullptr) {
1993 const JVMState* jvms = map()->jvms();
1994 if (EliminateNestedLocks &&
1995 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1996 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1997 // Use old BoxLock node as merged box.
1998 assert(newin->jvms()->is_monitor_box(j), "sanity");
1999 // This assert also tests that nodes are BoxLock.
2000 assert(BoxLockNode::same_slot(n, m), "sanity");
2001 BoxLockNode* old_box = m->as_BoxLock();
2002 if (n->as_BoxLock()->is_unbalanced() && !old_box->is_unbalanced()) {
2003 // Preserve Unbalanced status.
2004 //
2005 // `old_box` can have only Regular or Coarsened status
2006 // because this code is executed only during Parse phase and
2007 // Incremental Inlining before EA and Macro nodes elimination.
2008 //
2009 // Incremental Inlining is executed after IGVN optimizations
2010 // during which BoxLock can be marked as Coarsened.
2011 old_box->set_coarsened(); // Verifies state
2012 old_box->set_unbalanced();
2013 }
2014 C->gvn_replace_by(n, m);
2015 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2016 phi = ensure_phi(j, nophi);
2017 }
2018 }
2019 break;
2020 }
2021 }
2022 // At this point, n might be top if:
2023 // - there is no phi (because TypeFlow detected a conflict), or
2024 // - the corresponding control edges is top (a dead incoming path)
2025 // It is a bug if we create a phi which sees a garbage value on a live path.
2026
2027 // Merging two inline types?
2028 if (phi != nullptr && phi->is_InlineType()) {
2029 // Reload current state because it may have been updated by ensure_phi
2030 assert(phi == map()->in(j), "unexpected value in map");
2031 assert(phi->as_InlineType()->has_phi_inputs(r), "");
2032 InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2033 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2034 assert(vtm == phi, "Inline type should have Phi input");
2035
2036 #ifdef ASSERT
2037 if (TraceOptoParse) {
2038 tty->print_cr("\nMerging inline types");
2039 tty->print_cr("Current:");
2040 vtm->dump(2);
2041 tty->print_cr("Incoming:");
2042 vtn->dump(2);
2043 tty->cr();
2044 }
2045 #endif
2046 // Do the merge
2047 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2048 if (last_merge) {
2049 map()->set_req(j, _gvn.transform(vtm));
2050 record_for_igvn(vtm);
2051 }
2052 } else if (phi != nullptr) {
2053 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2054 assert(phi->as_Phi()->region() == r, "");
2055 phi->set_req(pnum, n); // Then add 'n' to the merge
2056 if (last_merge) {
2057 // Last merge for this Phi.
2058 // So far, Phis have had a reasonable type from ciTypeFlow.
2059 // Now _gvn will join that with the meet of current inputs.
2060 // BOTTOM is never permissible here, 'cause pessimistically
2061 // Phis of pointers cannot lose the basic pointer type.
2062 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2063 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2064 map()->set_req(j, _gvn.transform(phi));
2065 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2066 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2067 record_for_igvn(phi);
2068 }
2069 }
2070 } // End of for all values to be merged
2071
2072 if (last_merge && !r->in(0)) { // The occasional useless Region
2073 assert(control() == r, "");
2074 set_control(r->nonnull_req());
2075 }
2076
2077 map()->merge_replaced_nodes_with(newin);
2078
2079 // newin has been subsumed into the lazy merge, and is now dead.
2080 set_block(save_block);
2081
2082 stop(); // done with this guy, for now
2083 }
2084
2085 if (TraceOptoParse) {
2086 tty->print_cr(" on path %d", pnum);
2087 }
2088
2089 // Done with this parser state.
2090 assert(stopped(), "");
2091 }
2092
2093
2094 //--------------------------merge_memory_edges---------------------------------
2095 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
2096 // (nophi means we must not create phis, because we already parsed here)
2097 assert(n != nullptr, "");
2098 // Merge the inputs to the MergeMems
2099 MergeMemNode* m = merged_memory();
2100
2101 assert(control()->is_Region(), "must be merging to a region");
2102 RegionNode* r = control()->as_Region();
2103
2104 PhiNode* base = nullptr;
2105 MergeMemNode* remerge = nullptr;
2106 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
2107 Node *p = mms.force_memory();
2108 Node *q = mms.memory2();
2109 if (mms.is_empty() && nophi) {
2110 // Trouble: No new splits allowed after a loop body is parsed.
2111 // Instead, wire the new split into a MergeMem on the backedge.
2112 // The optimizer will sort it out, slicing the phi.
2113 if (remerge == nullptr) {
2114 guarantee(base != nullptr, "");
2115 assert(base->in(0) != nullptr, "should not be xformed away");
2116 remerge = MergeMemNode::make(base->in(pnum));
2117 gvn().set_type(remerge, Type::MEMORY);
2118 base->set_req(pnum, remerge);
2119 }
2120 remerge->set_memory_at(mms.alias_idx(), q);
2121 continue;
2122 }
2123 assert(!q->is_MergeMem(), "");
2124 PhiNode* phi;
2125 if (p != q) {
2126 phi = ensure_memory_phi(mms.alias_idx(), nophi);
2127 } else {
2128 if (p->is_Phi() && p->as_Phi()->region() == r)
2129 phi = p->as_Phi();
2130 else
2131 phi = nullptr;
2132 }
2133 // Insert q into local phi
2134 if (phi != nullptr) {
2135 assert(phi->region() == r, "");
2136 p = phi;
2137 phi->set_req(pnum, q);
2138 if (mms.at_base_memory()) {
2139 base = phi; // delay transforming it
2140 } else if (pnum == 1) {
2141 record_for_igvn(phi);
2142 p = _gvn.transform(phi);
2143 }
2144 mms.set_memory(p);// store back through the iterator
2145 }
2146 }
2147 // Transform base last, in case we must fiddle with remerging.
2148 if (base != nullptr && pnum == 1) {
2149 record_for_igvn(base);
2150 m->set_base_memory(_gvn.transform(base));
2151 }
2152 }
2153
2154
2155 //------------------------ensure_phis_everywhere-------------------------------
2156 void Parse::ensure_phis_everywhere() {
2157 ensure_phi(TypeFunc::I_O);
2158
2159 // Ensure a phi on all currently known memories.
2160 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
2161 ensure_memory_phi(mms.alias_idx());
2162 DEBUG_ONLY(mms.set_memory()); // keep the iterator happy
2163 }
2164
2165 // Note: This is our only chance to create phis for memory slices.
2166 // If we miss a slice that crops up later, it will have to be
2167 // merged into the base-memory phi that we are building here.
2168 // Later, the optimizer will comb out the knot, and build separate
2169 // phi-loops for each memory slice that matters.
2170
2171 // Monitors must nest nicely and not get confused amongst themselves.
2172 // Phi-ify everything up to the monitors, though.
2173 uint monoff = map()->jvms()->monoff();
2174 uint nof_monitors = map()->jvms()->nof_monitors();
2175
2176 assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
2177 bool check_elide_phi = block()->is_SEL_head();
2178 for (uint i = TypeFunc::Parms; i < monoff; i++) {
2179 if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) {
2180 ensure_phi(i);
2181 }
2182 }
2183
2184 // Even monitors need Phis, though they are well-structured.
2185 // This is true for OSR methods, and also for the rare cases where
2186 // a monitor object is the subject of a replace_in_map operation.
2187 // See bugs 4426707 and 5043395.
2188 for (uint m = 0; m < nof_monitors; m++) {
2189 ensure_phi(map()->jvms()->monitor_obj_offset(m));
2190 }
2191 }
2192
2193
2194 //-----------------------------add_new_path------------------------------------
2195 // Add a previously unaccounted predecessor to this block.
2196 int Parse::Block::add_new_path() {
2197 // If there is no map, return the lowest unused path number.
2198 if (!is_merged()) return pred_count()+1; // there will be a map shortly
2199
2200 SafePointNode* map = start_map();
2201 if (!map->control()->is_Region())
2202 return pred_count()+1; // there may be a region some day
2203 RegionNode* r = map->control()->as_Region();
2204
2205 // Add new path to the region.
2206 uint pnum = r->req();
2207 r->add_req(nullptr);
2208
2209 for (uint i = 1; i < map->req(); i++) {
2210 Node* n = map->in(i);
2211 if (i == TypeFunc::Memory) {
2212 // Ensure a phi on all currently known memories.
2213 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2214 Node* phi = mms.memory();
2215 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2216 assert(phi->req() == pnum, "must be same size as region");
2217 phi->add_req(nullptr);
2218 }
2219 }
2220 } else {
2221 if (n->is_Phi() && n->as_Phi()->region() == r) {
2222 assert(n->req() == pnum, "must be same size as region");
2223 n->add_req(nullptr);
2224 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2225 n->as_InlineType()->add_new_path(r);
2226 }
2227 }
2228 }
2229
2230 return pnum;
2231 }
2232
2233 //------------------------------ensure_phi-------------------------------------
2234 // Turn the idx'th entry of the current map into a Phi
2235 Node* Parse::ensure_phi(int idx, bool nocreate) {
2236 SafePointNode* map = this->map();
2237 Node* region = map->control();
2238 assert(region->is_Region(), "");
2239
2240 Node* o = map->in(idx);
2241 assert(o != nullptr, "");
2242
2243 if (o == top()) return nullptr; // TOP always merges into TOP
2244
2245 if (o->is_Phi() && o->as_Phi()->region() == region) {
2246 return o->as_Phi();
2247 }
2248 InlineTypeNode* vt = o->isa_InlineType();
2249 if (vt != nullptr && vt->has_phi_inputs(region)) {
2250 return vt;
2251 }
2252
2253 // Now use a Phi here for merging
2254 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2255 const JVMState* jvms = map->jvms();
2256 const Type* t = nullptr;
2257 if (jvms->is_loc(idx)) {
2258 t = block()->local_type_at(idx - jvms->locoff());
2259 } else if (jvms->is_stk(idx)) {
2260 t = block()->stack_type_at(idx - jvms->stkoff());
2261 } else if (jvms->is_mon(idx)) {
2262 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2263 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2264 } else if ((uint)idx < TypeFunc::Parms) {
2265 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2266 } else {
2267 assert(false, "no type information for this phi");
2268 }
2269
2270 // If the type falls to bottom, then this must be a local that
2271 // is already dead or is mixing ints and oops or some such.
2272 // Forcing it to top makes it go dead.
2273 if (t == Type::BOTTOM) {
2274 map->set_req(idx, top());
2275 return nullptr;
2276 }
2277
2278 // Do not create phis for top either.
2279 // A top on a non-null control flow must be an unused even after the.phi.
2280 if (t == Type::TOP || t == Type::HALF) {
2281 map->set_req(idx, top());
2282 return nullptr;
2283 }
2284
2285 if (vt != nullptr && t->is_inlinetypeptr()) {
2286 // Inline types are merged by merging their field values.
2287 // Create a cloned InlineTypeNode with phi inputs that
2288 // represents the merged inline type and update the map.
2289 vt = vt->clone_with_phis(&_gvn, region);
2290 map->set_req(idx, vt);
2291 return vt;
2292 } else {
2293 PhiNode* phi = PhiNode::make(region, o, t);
2294 gvn().set_type(phi, t);
2295 if (C->do_escape_analysis()) record_for_igvn(phi);
2296 map->set_req(idx, phi);
2297 return phi;
2298 }
2299 }
2300
2301 //--------------------------ensure_memory_phi----------------------------------
2302 // Turn the idx'th slice of the current memory into a Phi
2303 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2304 MergeMemNode* mem = merged_memory();
2305 Node* region = control();
2306 assert(region->is_Region(), "");
2307
2308 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2309 assert(o != nullptr && o != top(), "");
2310
2311 PhiNode* phi;
2312 if (o->is_Phi() && o->as_Phi()->region() == region) {
2313 phi = o->as_Phi();
2314 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2315 // clone the shared base memory phi to make a new memory split
2316 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2317 const Type* t = phi->bottom_type();
2318 const TypePtr* adr_type = C->get_adr_type(idx);
2319 phi = phi->slice_memory(adr_type);
2320 gvn().set_type(phi, t);
2321 }
2322 return phi;
2323 }
2324
2325 // Now use a Phi here for merging
2326 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2327 const Type* t = o->bottom_type();
2328 const TypePtr* adr_type = C->get_adr_type(idx);
2329 phi = PhiNode::make(region, o, t, adr_type);
2330 gvn().set_type(phi, t);
2331 if (idx == Compile::AliasIdxBot)
2332 mem->set_base_memory(phi);
2333 else
2334 mem->set_memory_at(idx, phi);
2335 return phi;
2336 }
2337
2338 //------------------------------call_register_finalizer-----------------------
2339 // Check the klass of the receiver and call register_finalizer if the
2340 // class need finalization.
2341 void Parse::call_register_finalizer() {
2342 Node* receiver = local(0);
2343 assert(receiver != nullptr && receiver->bottom_type()->isa_instptr() != nullptr,
2344 "must have non-null instance type");
2345
2346 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
2347 if (tinst != nullptr && tinst->is_loaded() && !tinst->klass_is_exact()) {
2348 // The type isn't known exactly so see if CHA tells us anything.
2349 ciInstanceKlass* ik = tinst->instance_klass();
2350 if (!Dependencies::has_finalizable_subclass(ik)) {
2351 // No finalizable subclasses so skip the dynamic check.
2352 C->dependencies()->assert_has_no_finalizable_subclasses(ik);
2353 return;
2354 }
2355 }
2356
2357 // Insert a dynamic test for whether the instance needs
2358 // finalization. In general this will fold up since the concrete
2359 // class is often visible so the access flags are constant.
2360 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
2361 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
2362
2363 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::misc_flags_offset()));
2364 Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2365
2366 Node* mask = _gvn.transform(new AndINode(access_flags, intcon(KlassFlags::_misc_has_finalizer)));
2367 Node* check = _gvn.transform(new CmpINode(mask, intcon(0)));
2368 Node* test = _gvn.transform(new BoolNode(check, BoolTest::ne));
2369
2370 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
2371
2372 RegionNode* result_rgn = new RegionNode(3);
2373 record_for_igvn(result_rgn);
2374
2375 Node *skip_register = _gvn.transform(new IfFalseNode(iff));
2376 result_rgn->init_req(1, skip_register);
2377
2378 Node *needs_register = _gvn.transform(new IfTrueNode(iff));
2379 set_control(needs_register);
2380 if (stopped()) {
2381 // There is no slow path.
2382 result_rgn->init_req(2, top());
2383 } else {
2384 Node *call = make_runtime_call(RC_NO_LEAF,
2385 OptoRuntime::register_finalizer_Type(),
2386 OptoRuntime::register_finalizer_Java(),
2387 nullptr, TypePtr::BOTTOM,
2388 receiver);
2389 make_slow_call_ex(call, env()->Throwable_klass(), true);
2390
2391 Node* fast_io = call->in(TypeFunc::I_O);
2392 Node* fast_mem = call->in(TypeFunc::Memory);
2393 // These two phis are pre-filled with copies of of the fast IO and Memory
2394 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2395 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2396
2397 result_rgn->init_req(2, control());
2398 io_phi ->init_req(2, i_o());
2399 mem_phi ->init_req(2, reset_memory());
2400
2401 set_all_memory( _gvn.transform(mem_phi) );
2402 set_i_o( _gvn.transform(io_phi) );
2403 }
2404
2405 set_control( _gvn.transform(result_rgn) );
2406 }
2407
2408 // Add check to deoptimize once holder klass is fully initialized.
2409 void Parse::clinit_deopt() {
2410 assert(C->has_method(), "only for normal compilations");
2411 assert(depth() == 1, "only for main compiled method");
2412 assert(is_normal_parse(), "no barrier needed on osr entry");
2413 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2414
2415 set_parse_bci(0);
2416
2417 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2418 guard_klass_being_initialized(holder);
2419 }
2420
2421 //------------------------------return_current---------------------------------
2422 // Append current _map to _exit_return
2423 void Parse::return_current(Node* value) {
2424 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2425 call_register_finalizer();
2426 }
2427
2428 // frame pointer is always same, already captured
2429 if (value != nullptr) {
2430 Node* phi = _exits.argument(0);
2431 const Type* return_type = phi->bottom_type();
2432 const TypeInstPtr* tr = return_type->isa_instptr();
2433 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2434 return_type->is_inlinetypeptr()) {
2435 // Inline type is returned as fields, make sure it is scalarized
2436 if (!value->is_InlineType()) {
2437 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2438 }
2439 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2440 // Returning from root or an incrementally inlined method. Make sure all non-flat
2441 // fields are buffered and re-execute if allocation triggers deoptimization.
2442 PreserveReexecuteState preexecs(this);
2443 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2444 jvms()->set_should_reexecute(true);
2445 inc_sp(1);
2446 value = value->as_InlineType()->allocate_fields(this);
2447 }
2448 } else if (value->is_InlineType()) {
2449 // Inline type is returned as oop, make sure it is buffered and re-execute
2450 // if allocation triggers deoptimization.
2451 PreserveReexecuteState preexecs(this);
2452 jvms()->set_should_reexecute(true);
2453 inc_sp(1);
2454 value = value->as_InlineType()->buffer(this);
2455 }
2456 // ...else
2457 // If returning oops to an interface-return, there is a silent free
2458 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2459 phi->add_req(value);
2460 }
2461
2462 // Do not set_parse_bci, so that return goo is credited to the return insn.
2463 set_bci(InvocationEntryBci);
2464 if (method()->is_synchronized()) {
2465 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2466 }
2467 if (C->env()->dtrace_method_probes()) {
2468 make_dtrace_method_exit(method());
2469 }
2470
2471 SafePointNode* exit_return = _exits.map();
2472 exit_return->in( TypeFunc::Control )->add_req( control() );
2473 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2474 Node *mem = exit_return->in( TypeFunc::Memory );
2475 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2476 if (mms.is_empty()) {
2477 // get a copy of the base memory, and patch just this one input
2478 const TypePtr* adr_type = mms.adr_type(C);
2479 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2480 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2481 gvn().set_type_bottom(phi);
2482 phi->del_req(phi->req()-1); // prepare to re-patch
2483 mms.set_memory(phi);
2484 }
2485 mms.memory()->add_req(mms.memory2());
2486 }
2487
2488 if (_first_return) {
2489 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2490 _first_return = false;
2491 } else {
2492 _exits.map()->merge_replaced_nodes_with(map());
2493 }
2494
2495 stop_and_kill_map(); // This CFG path dies here
2496 }
2497
2498
2499 //------------------------------add_safepoint----------------------------------
2500 void Parse::add_safepoint() {
2501 uint parms = TypeFunc::Parms+1;
2502
2503 // Clear out dead values from the debug info.
2504 kill_dead_locals();
2505
2506 // Clone the JVM State
2507 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
2508
2509 // Capture memory state BEFORE a SafePoint. Since we can block at a
2510 // SafePoint we need our GC state to be safe; i.e. we need all our current
2511 // write barriers (card marks) to not float down after the SafePoint so we
2512 // must read raw memory. Likewise we need all oop stores to match the card
2513 // marks. If deopt can happen, we need ALL stores (we need the correct JVM
2514 // state on a deopt).
2515
2516 // We do not need to WRITE the memory state after a SafePoint. The control
2517 // edge will keep card-marks and oop-stores from floating up from below a
2518 // SafePoint and our true dependency added here will keep them from floating
2519 // down below a SafePoint.
2520
2521 // Clone the current memory state
2522 Node* mem = MergeMemNode::make(map()->memory());
2523
2524 mem = _gvn.transform(mem);
2525
2526 // Pass control through the safepoint
2527 sfpnt->init_req(TypeFunc::Control , control());
2528 // Fix edges normally used by a call
2529 sfpnt->init_req(TypeFunc::I_O , top() );
2530 sfpnt->init_req(TypeFunc::Memory , mem );
2531 sfpnt->init_req(TypeFunc::ReturnAdr, top() );
2532 sfpnt->init_req(TypeFunc::FramePtr , top() );
2533
2534 // Create a node for the polling address
2535 Node *polladr;
2536 Node *thread = _gvn.transform(new ThreadLocalNode());
2537 Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(JavaThread::polling_page_offset())));
2538 polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2539 sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
2540
2541 // Fix up the JVM State edges
2542 add_safepoint_edges(sfpnt);
2543 Node *transformed_sfpnt = _gvn.transform(sfpnt);
2544 set_control(transformed_sfpnt);
2545
2546 // Provide an edge from root to safepoint. This makes the safepoint
2547 // appear useful until the parse has completed.
2548 if (transformed_sfpnt->is_SafePoint()) {
2549 assert(C->root() != nullptr, "Expect parse is still valid");
2550 C->root()->add_prec(transformed_sfpnt);
2551 }
2552 }
2553
2554 #ifndef PRODUCT
2555 //------------------------show_parse_info--------------------------------------
2556 void Parse::show_parse_info() {
2557 InlineTree* ilt = nullptr;
2558 if (C->ilt() != nullptr) {
2559 JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller();
2560 ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method());
2561 }
2562 if (PrintCompilation && Verbose) {
2563 if (depth() == 1) {
2564 if( ilt->count_inlines() ) {
2565 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2566 ilt->count_inline_bcs());
2567 tty->cr();
2568 }
2569 } else {
2570 if (method()->is_synchronized()) tty->print("s");
2571 if (method()->has_exception_handlers()) tty->print("!");
2572 // Check this is not the final compiled version
2573 if (C->trap_can_recompile()) {
2574 tty->print("-");
2575 } else {
2576 tty->print(" ");
2577 }
2578 method()->print_short_name();
2579 if (is_osr_parse()) {
2580 tty->print(" @ %d", osr_bci());
2581 }
2582 tty->print(" (%d bytes)",method()->code_size());
2583 if (ilt->count_inlines()) {
2584 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2585 ilt->count_inline_bcs());
2586 }
2587 tty->cr();
2588 }
2589 }
2590 if (PrintOpto && (depth() == 1 || PrintOptoInlining)) {
2591 // Print that we succeeded; suppress this message on the first osr parse.
2592
2593 if (method()->is_synchronized()) tty->print("s");
2594 if (method()->has_exception_handlers()) tty->print("!");
2595 // Check this is not the final compiled version
2596 if (C->trap_can_recompile() && depth() == 1) {
2597 tty->print("-");
2598 } else {
2599 tty->print(" ");
2600 }
2601 if( depth() != 1 ) { tty->print(" "); } // missing compile count
2602 for (int i = 1; i < depth(); ++i) { tty->print(" "); }
2603 method()->print_short_name();
2604 if (is_osr_parse()) {
2605 tty->print(" @ %d", osr_bci());
2606 }
2607 if (ilt->caller_bci() != -1) {
2608 tty->print(" @ %d", ilt->caller_bci());
2609 }
2610 tty->print(" (%d bytes)",method()->code_size());
2611 if (ilt->count_inlines()) {
2612 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2613 ilt->count_inline_bcs());
2614 }
2615 tty->cr();
2616 }
2617 }
2618
2619
2620 //------------------------------dump-------------------------------------------
2621 // Dump information associated with the bytecodes of current _method
2622 void Parse::dump() {
2623 if( method() != nullptr ) {
2624 // Iterate over bytecodes
2625 ciBytecodeStream iter(method());
2626 for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) {
2627 dump_bci( iter.cur_bci() );
2628 tty->cr();
2629 }
2630 }
2631 }
2632
2633 // Dump information associated with a byte code index, 'bci'
2634 void Parse::dump_bci(int bci) {
2635 // Output info on merge-points, cloning, and within _jsr..._ret
2636 // NYI
2637 tty->print(" bci:%d", bci);
2638 }
2639
2640 #endif