1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciObjArrayKlass.hpp"
26 #include "ci/ciSignature.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "interpreter/linkResolver.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "oops/method.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/c2compiler.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/convertnode.hpp"
35 #include "opto/idealGraphPrinter.hpp"
36 #include "opto/inlinetypenode.hpp"
37 #include "opto/locknode.hpp"
38 #include "opto/memnode.hpp"
39 #include "opto/opaquenode.hpp"
40 #include "opto/parse.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/runtime.hpp"
43 #include "opto/type.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/handles.inline.hpp"
46 #include "runtime/safepointMechanism.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #include "utilities/copy.hpp"
50
51 // Static array so we can figure out which bytecodes stop us from compiling
52 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
53 // and eventually should be encapsulated in a proper class (gri 8/18/98).
54
55 #ifndef PRODUCT
56 uint nodes_created = 0;
57 uint methods_parsed = 0;
58 uint methods_seen = 0;
59 uint blocks_parsed = 0;
60 uint blocks_seen = 0;
61
62 uint explicit_null_checks_inserted = 0;
63 uint explicit_null_checks_elided = 0;
64 uint all_null_checks_found = 0;
65 uint implicit_null_checks = 0;
66
67 bool Parse::BytecodeParseHistogram::_initialized = false;
68 uint Parse::BytecodeParseHistogram::_bytecodes_parsed [Bytecodes::number_of_codes];
69 uint Parse::BytecodeParseHistogram::_nodes_constructed[Bytecodes::number_of_codes];
70 uint Parse::BytecodeParseHistogram::_nodes_transformed[Bytecodes::number_of_codes];
71 uint Parse::BytecodeParseHistogram::_new_values [Bytecodes::number_of_codes];
72
73 //------------------------------print_statistics-------------------------------
74 void Parse::print_statistics() {
75 tty->print_cr("--- Compiler Statistics ---");
76 tty->print("Methods seen: %u Methods parsed: %u", methods_seen, methods_parsed);
77 tty->print(" Nodes created: %u", nodes_created);
78 tty->cr();
79 if (methods_seen != methods_parsed) {
80 tty->print_cr("Reasons for parse failures (NOT cumulative):");
81 }
82 tty->print_cr("Blocks parsed: %u Blocks seen: %u", blocks_parsed, blocks_seen);
83
84 if (explicit_null_checks_inserted) {
85 tty->print_cr("%u original null checks - %u elided (%2u%%); optimizer leaves %u,",
86 explicit_null_checks_inserted, explicit_null_checks_elided,
87 (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
88 all_null_checks_found);
89 }
90 if (all_null_checks_found) {
91 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
92 (100*implicit_null_checks)/all_null_checks_found);
93 }
94 if (SharedRuntime::_implicit_null_throws) {
95 tty->print_cr("%u implicit null exceptions at runtime",
96 SharedRuntime::_implicit_null_throws);
97 }
98
99 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
100 BytecodeParseHistogram::print();
101 }
102 }
103 #endif
104
105 //------------------------------ON STACK REPLACEMENT---------------------------
106
107 // Construct a node which can be used to get incoming state for
108 // on stack replacement.
109 Node* Parse::fetch_interpreter_state(int index,
110 const Type* type,
111 Node* local_addrs,
112 Node* local_addrs_base) {
113 BasicType bt = type->basic_type();
114 if (type == TypePtr::NULL_PTR) {
115 // Ptr types are mixed together with T_ADDRESS but nullptr is
116 // really for T_OBJECT types so correct it.
117 bt = T_OBJECT;
118 }
119 Node *mem = memory(Compile::AliasIdxRaw);
120 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
121 Node *ctl = control();
122
123 // Very similar to LoadNode::make, except we handle un-aligned longs and
124 // doubles on Sparc. Intel can handle them just fine directly.
125 Node *l = nullptr;
126 switch (bt) { // Signature is flattened
127 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
128 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
129 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
130 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
131 case T_LONG:
132 case T_DOUBLE: {
133 // Since arguments are in reverse order, the argument address 'adr'
134 // refers to the back half of the long/double. Recompute adr.
135 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
136 if (Matcher::misaligned_doubles_ok) {
137 l = (bt == T_DOUBLE)
138 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
139 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
140 } else {
141 l = (bt == T_DOUBLE)
142 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
143 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
144 }
145 break;
146 }
147 default: ShouldNotReachHere();
148 }
149 return _gvn.transform(l);
150 }
151
152 // Helper routine to prevent the interpreter from handing
153 // unexpected typestate to an OSR method.
154 // The Node l is a value newly dug out of the interpreter frame.
155 // The type is the type predicted by ciTypeFlow. Note that it is
156 // not a general type, but can only come from Type::get_typeflow_type.
157 // The safepoint is a map which will feed an uncommon trap.
158 Node* Parse::check_interpreter_type(Node* l, const Type* type, const TypeKlassPtr* klass_type,
159 SafePointNode* &bad_type_exit, bool is_early_larval) {
160 const TypeOopPtr* tp = type->isa_oopptr();
161
162 // TypeFlow may assert null-ness if a type appears unloaded.
163 if (type == TypePtr::NULL_PTR ||
164 (tp != nullptr && !tp->is_loaded())) {
165 // Value must be null, not a real oop.
166 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
167 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
168 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
169 set_control(_gvn.transform( new IfTrueNode(iff) ));
170 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
171 bad_type_exit->control()->add_req(bad_type);
172 l = null();
173 }
174
175 // Typeflow can also cut off paths from the CFG, based on
176 // types which appear unloaded, or call sites which appear unlinked.
177 // When paths are cut off, values at later merge points can rise
178 // toward more specific classes. Make sure these specific classes
179 // are still in effect.
180 if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
181 // TypeFlow asserted a specific object type. Value must have that type.
182 Node* bad_type_ctrl = nullptr;
183 if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
184 // Check inline types for null here to prevent checkcast from adding an
185 // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
186 l = null_check_oop(l, &bad_type_ctrl);
187 bad_type_exit->control()->add_req(bad_type_ctrl);
188 }
189
190 l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, false, is_early_larval);
191 bad_type_exit->control()->add_req(bad_type_ctrl);
192 }
193
194 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
195 return l;
196 }
197
198 // Helper routine which sets up elements of the initial parser map when
199 // performing a parse for on stack replacement. Add values into map.
200 // The only parameter contains the address of a interpreter arguments.
201 void Parse::load_interpreter_state(Node* osr_buf) {
202 int index;
203 int max_locals = jvms()->loc_size();
204 int max_stack = jvms()->stk_size();
205
206 // Mismatch between method and jvms can occur since map briefly held
207 // an OSR entry state (which takes up one RawPtr word).
208 assert(max_locals == method()->max_locals(), "sanity");
209 assert(max_stack >= method()->max_stack(), "sanity");
210 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
211 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
212
213 // Find the start block.
214 Block* osr_block = start_block();
215 assert(osr_block->start() == osr_bci(), "sanity");
216
217 // Set initial BCI.
218 set_parse_bci(osr_block->start());
219
220 // Set initial stack depth.
221 set_sp(osr_block->start_sp());
222
223 // Check bailouts. We currently do not perform on stack replacement
224 // of loops in catch blocks or loops which branch with a non-empty stack.
225 if (sp() != 0) {
226 C->record_method_not_compilable("OSR starts with non-empty stack");
227 return;
228 }
229 // Do not OSR inside finally clauses:
230 if (osr_block->has_trap_at(osr_block->start())) {
231 assert(false, "OSR starts with an immediate trap");
232 C->record_method_not_compilable("OSR starts with an immediate trap");
233 return;
234 }
235
236 // Commute monitors from interpreter frame to compiler frame.
237 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
238 int mcnt = osr_block->flow()->monitor_count();
239 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
240 for (index = 0; index < mcnt; index++) {
241 // Make a BoxLockNode for the monitor.
242 BoxLockNode* osr_box = new BoxLockNode(next_monitor());
243 // Check for bailout after new BoxLockNode
244 if (failing()) { return; }
245
246 // This OSR locking region is unbalanced because it does not have Lock node:
247 // locking was done in Interpreter.
248 // This is similar to Coarsened case when Lock node is eliminated
249 // and as result the region is marked as Unbalanced.
250
251 // Emulate Coarsened state transition from Regular to Unbalanced.
252 osr_box->set_coarsened();
253 osr_box->set_unbalanced();
254
255 Node* box = _gvn.transform(osr_box);
256
257 // Displaced headers and locked objects are interleaved in the
258 // temp OSR buffer. We only copy the locked objects out here.
259 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
260 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
261 // Try and copy the displaced header to the BoxNode
262 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
263
264 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
265
266 // Build a bogus FastLockNode (no code will be generated) and push the
267 // monitor into our debug info.
268 const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
269 map()->push_monitor(flock);
270
271 // If the lock is our method synchronization lock, tuck it away in
272 // _sync_lock for return and rethrow exit paths.
273 if (index == 0 && method()->is_synchronized()) {
274 _synch_lock = flock;
275 }
276 }
277
278 // Use the raw liveness computation to make sure that unexpected
279 // values don't propagate into the OSR frame.
280 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
281 if (!live_locals.is_valid()) {
282 // Degenerate or breakpointed method.
283 assert(false, "OSR in empty or breakpointed method");
284 C->record_method_not_compilable("OSR in empty or breakpointed method");
285 return;
286 }
287
288 // Extract the needed locals from the interpreter frame.
289 Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
290
291 // find all the locals that the interpreter thinks contain live oops
292 const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
293 for (index = 0; index < max_locals; index++) {
294
295 if (!live_locals.at(index)) {
296 continue;
297 }
298
299 const Type *type = osr_block->local_type_at(index);
300
301 if (type->isa_oopptr() != nullptr) {
302
303 // 6403625: Verify that the interpreter oopMap thinks that the oop is live
304 // else we might load a stale oop if the MethodLiveness disagrees with the
305 // result of the interpreter. If the interpreter says it is dead we agree
306 // by making the value go to top.
307 //
308
309 if (!live_oops.at(index)) {
310 if (C->log() != nullptr) {
311 C->log()->elem("OSR_mismatch local_index='%d'",index);
312 }
313 set_local(index, null());
314 // and ignore it for the loads
315 continue;
316 }
317 }
318
319 // Filter out TOP, HALF, and BOTTOM. (Cf. ensure_phi.)
320 if (type == Type::TOP || type == Type::HALF) {
321 continue;
322 }
323 // If the type falls to bottom, then this must be a local that
324 // is mixing ints and oops or some such. Forcing it to top
325 // makes it go dead.
326 if (type == Type::BOTTOM) {
327 continue;
328 }
329 // Construct code to access the appropriate local.
330 Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);
331 set_local(index, value);
332 }
333
334 // Extract the needed stack entries from the interpreter frame.
335 for (index = 0; index < sp(); index++) {
336 const Type *type = osr_block->stack_type_at(index);
337 if (type != Type::TOP) {
338 // Currently the compiler bails out when attempting to on stack replace
339 // at a bci with a non-empty stack. We should not reach here.
340 ShouldNotReachHere();
341 }
342 }
343
344 // End the OSR migration
345 make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
346 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
347 "OSR_migration_end", TypeRawPtr::BOTTOM,
348 osr_buf);
349
350 // Now that the interpreter state is loaded, make sure it will match
351 // at execution time what the compiler is expecting now:
352 SafePointNode* bad_type_exit = clone_map();
353 bad_type_exit->set_control(new RegionNode(1));
354
355 assert(osr_block->flow()->jsrs()->size() == 0, "should be no jsrs live at osr point");
356 for (index = 0; index < max_locals; index++) {
357 if (stopped()) break;
358 Node* l = local(index);
359 if (l->is_top()) continue; // nothing here
360 const Type *type = osr_block->local_type_at(index);
361 if (type->isa_oopptr() != nullptr) {
362 if (!live_oops.at(index)) {
363 // skip type check for dead oops
364 continue;
365 }
366 }
367 if (osr_block->flow()->local_type_at(index)->is_return_address()) {
368 // In our current system it's illegal for jsr addresses to be
369 // live into an OSR entry point because the compiler performs
370 // inlining of jsrs. ciTypeFlow has a bailout that detect this
371 // case and aborts the compile if addresses are live into an OSR
372 // entry point. Because of that we can assume that any address
373 // locals at the OSR entry point are dead. Method liveness
374 // isn't precise enough to figure out that they are dead in all
375 // cases so simply skip checking address locals all
376 // together. Any type check is guaranteed to fail since the
377 // interpreter type is the result of a load which might have any
378 // value and the expected type is a constant.
379 continue;
380 }
381 const TypeKlassPtr* klass_type = nullptr;
382 if (type->isa_oopptr()) {
383 klass_type = TypeKlassPtr::make(osr_block->flow()->local_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
384 klass_type = klass_type->try_improve();
385 }
386 bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
387 set_local(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
388 }
389
390 for (index = 0; index < sp(); index++) {
391 if (stopped()) break;
392 Node* l = stack(index);
393 if (l->is_top()) continue; // nothing here
394 const Type* type = osr_block->stack_type_at(index);
395 const TypeKlassPtr* klass_type = nullptr;
396 if (type->isa_oopptr()) {
397 klass_type = TypeKlassPtr::make(osr_block->flow()->stack_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
398 klass_type = klass_type->try_improve();
399 }
400 bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
401 set_stack(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
402 }
403
404 if (bad_type_exit->control()->req() > 1) {
405 // Build an uncommon trap here, if any inputs can be unexpected.
406 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
407 record_for_igvn(bad_type_exit->control());
408 SafePointNode* types_are_good = map();
409 set_map(bad_type_exit);
410 // The unexpected type happens because a new edge is active
411 // in the CFG, which typeflow had previously ignored.
412 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
413 // This x will be typed as Integer if notReached is not yet linked.
414 // It could also happen due to a problem in ciTypeFlow analysis.
415 uncommon_trap(Deoptimization::Reason_constraint,
416 Deoptimization::Action_reinterpret);
417 set_map(types_are_good);
418 }
419 }
420
421 //------------------------------Parse------------------------------------------
422 // Main parser constructor.
423 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
424 : _exits(caller)
425 {
426 // Init some variables
427 _caller = caller;
428 _method = parse_method;
429 _expected_uses = expected_uses;
430 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
431 _wrote_final = false;
432 _wrote_volatile = false;
433 _wrote_stable = false;
434 _wrote_fields = false;
435 _alloc_with_final_or_stable = nullptr;
436 _block = nullptr;
437 _first_return = true;
438 _replaced_nodes_for_exceptions = false;
439 _new_idx = C->unique();
440 DEBUG_ONLY(_entry_bci = UnknownBci);
441 DEBUG_ONLY(_block_count = -1);
442 DEBUG_ONLY(_blocks = (Block*)-1);
443 #ifndef PRODUCT
444 if (PrintCompilation || PrintOpto) {
445 // Make sure I have an inline tree, so I can print messages about it.
446 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
447 }
448 _max_switch_depth = 0;
449 _est_switch_depth = 0;
450 #endif
451
452 if (parse_method->has_reserved_stack_access()) {
453 C->set_has_reserved_stack_access(true);
454 }
455
456 if (parse_method->is_synchronized() || parse_method->has_monitor_bytecodes()) {
457 C->set_has_monitors(true);
458 }
459
460 if (parse_method->is_scoped()) {
461 C->set_has_scoped_access(true);
462 }
463
464 _iter.reset_to_method(method());
465 C->set_has_loops(C->has_loops() || method()->has_loops());
466
467 if (_expected_uses <= 0) {
468 _prof_factor = 1;
469 } else {
470 float prof_total = parse_method->interpreter_invocation_count();
471 if (prof_total <= _expected_uses) {
472 _prof_factor = 1;
473 } else {
474 _prof_factor = _expected_uses / prof_total;
475 }
476 }
477
478 CompileLog* log = C->log();
479 if (log != nullptr) {
480 log->begin_head("parse method='%d' uses='%f'",
481 log->identify(parse_method), expected_uses);
482 if (depth() == 1 && C->is_osr_compilation()) {
483 log->print(" osr_bci='%d'", C->entry_bci());
484 }
485 log->stamp();
486 log->end_head();
487 }
488
489 // Accumulate deoptimization counts.
490 // (The range_check and store_check counts are checked elsewhere.)
491 ciMethodData* md = method()->method_data();
492 for (uint reason = 0; reason < md->trap_reason_limit(); reason++) {
493 uint md_count = md->trap_count(reason);
494 if (md_count != 0) {
495 if (md_count >= md->trap_count_limit()) {
496 md_count = md->trap_count_limit() + md->overflow_trap_count();
497 }
498 uint total_count = C->trap_count(reason);
499 uint old_count = total_count;
500 total_count += md_count;
501 // Saturate the add if it overflows.
502 if (total_count < old_count || total_count < md_count)
503 total_count = (uint)-1;
504 C->set_trap_count(reason, total_count);
505 if (log != nullptr)
506 log->elem("observe trap='%s' count='%d' total='%d'",
507 Deoptimization::trap_reason_name(reason),
508 md_count, total_count);
509 }
510 }
511 // Accumulate total sum of decompilations, also.
512 C->set_decompile_count(C->decompile_count() + md->decompile_count());
513
514 if (log != nullptr && method()->has_exception_handlers()) {
515 log->elem("observe that='has_exception_handlers'");
516 }
517
518 assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
519 assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
520
521 // Always register dependence if JVMTI is enabled, because
522 // either breakpoint setting or hotswapping of methods may
523 // cause deoptimization.
524 if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
525 C->dependencies()->assert_evol_method(method());
526 }
527
528 NOT_PRODUCT(methods_seen++);
529
530 // Do some special top-level things.
531 if (depth() == 1 && C->is_osr_compilation()) {
532 _tf = C->tf(); // the OSR entry type is different
533 _entry_bci = C->entry_bci();
534 _flow = method()->get_osr_flow_analysis(osr_bci());
535 } else {
536 _tf = TypeFunc::make(method());
537 _entry_bci = InvocationEntryBci;
538 _flow = method()->get_flow_analysis();
539 }
540
541 if (_flow->failing()) {
542 // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
543 // can lead to this. Re-enable once 8284443 is fixed.
544 //assert(false, "type flow analysis failed during parsing");
545 C->record_method_not_compilable(_flow->failure_reason());
546 #ifndef PRODUCT
547 if (PrintOpto && (Verbose || WizardMode)) {
548 if (is_osr_parse()) {
549 tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
550 } else {
551 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
552 }
553 if (Verbose) {
554 method()->print();
555 method()->print_codes();
556 _flow->print();
557 }
558 }
559 #endif
560 }
561
562 #ifdef ASSERT
563 if (depth() == 1) {
564 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
565 } else {
566 assert(!this->is_osr_parse(), "no recursive OSR");
567 }
568 #endif
569
570 #ifndef PRODUCT
571 if (_flow->has_irreducible_entry()) {
572 C->set_parsed_irreducible_loop(true);
573 }
574
575 methods_parsed++;
576 // add method size here to guarantee that inlined methods are added too
577 if (CITime)
578 _total_bytes_compiled += method()->code_size();
579
580 show_parse_info();
581 #endif
582
583 if (failing()) {
584 if (log) log->done("parse");
585 return;
586 }
587
588 gvn().transform(top());
589
590 // Import the results of the ciTypeFlow.
591 init_blocks();
592
593 // Merge point for all normal exits
594 build_exits();
595
596 // Setup the initial JVM state map.
597 SafePointNode* entry_map = create_entry_map();
598
599 // Check for bailouts during map initialization
600 if (failing() || entry_map == nullptr) {
601 if (log) log->done("parse");
602 return;
603 }
604
605 Node_Notes* caller_nn = C->default_node_notes();
606 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
607 if (DebugInlinedCalls || depth() == 1) {
608 C->set_default_node_notes(make_node_notes(caller_nn));
609 }
610
611 if (is_osr_parse()) {
612 Node* osr_buf = entry_map->in(TypeFunc::Parms+0);
613 entry_map->set_req(TypeFunc::Parms+0, top());
614 set_map(entry_map);
615 load_interpreter_state(osr_buf);
616 } else {
617 set_map(entry_map);
618 do_method_entry();
619 }
620
621 if (depth() == 1 && !failing()) {
622 if (C->clinit_barrier_on_entry()) {
623 // Add check to deoptimize the nmethod once the holder class is fully initialized
624 clinit_deopt();
625 }
626 }
627
628 // Check for bailouts during method entry.
629 if (failing()) {
630 if (log) log->done("parse");
631 C->set_default_node_notes(caller_nn);
632 return;
633 }
634
635 // Handle inline type arguments
636 int arg_size = method()->arg_size();
637 for (int i = 0; i < arg_size; i++) {
638 Node* parm = local(i);
639 const Type* t = _gvn.type(parm);
640 if (t->is_inlinetypeptr()) {
641 // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
642 // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
643 if (!(method()->is_object_constructor() && i == 0)) {
644 // Create InlineTypeNode from the oop and replace the parameter
645 Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
646 replace_in_map(parm, vt);
647 }
648 } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && depth() == 1 && method()->has_vararg() && t->isa_aryptr()) {
649 // Speculate on varargs Object array being the default array refined type. The assumption is
650 // that a vararg method test(Object... o) is often called as test(o1, o2, o3). javac will
651 // translate the call so that the caller will create a new default array of Object, put o1,
652 // o2, o3 into the newly created array, then invoke the method test. This only makes sense if
653 // the method we are parsing is the top-level method of the compilation unit. Otherwise, if
654 // it is truly called according to our assumption, we must know the exact type of the
655 // argument because the allocation happens inside the compilation unit.
656 const TypePtr* spec_type = (t->speculative() != nullptr) ? t->speculative() : t->remove_speculative()->is_aryptr();
657 ciSignature* method_signature = method()->signature();
658 ciType* parm_citype = method_signature->type_at(method_signature->count() - 1);
659 if (!parm_citype->is_obj_array_klass()) {
660 continue;
661 }
662
663 ciObjArrayKlass* spec_citype = ciObjArrayKlass::make(parm_citype->as_obj_array_klass()->element_klass(), true);
664 const Type* improved_spec_type = TypeKlassPtr::make(spec_citype, Type::trust_interfaces)->as_instance_type();
665 improved_spec_type = improved_spec_type->join(spec_type)->join(TypePtr::NOTNULL);
666 if (improved_spec_type->empty()) {
667 continue;
668 }
669
670 const TypePtr* improved_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, improved_spec_type->is_ptr());
671 improved_type = improved_type->join_speculative(t)->is_ptr();
672 if (improved_type != t) {
673 Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, improved_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
674 replace_in_map(parm, cast);
675 }
676 }
677 }
678
679 entry_map = map(); // capture any changes performed by method setup code
680 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
681
682 // We begin parsing as if we have just encountered a jump to the
683 // method entry.
684 Block* entry_block = start_block();
685 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
686 set_map_clone(entry_map);
687 merge_common(entry_block, entry_block->next_path_num());
688
689 #ifndef PRODUCT
690 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
691 set_parse_histogram( parse_histogram_obj );
692 #endif
693
694 // Parse all the basic blocks.
695 do_all_blocks();
696
697 // Check for bailouts during conversion to graph
698 if (failing()) {
699 if (log) log->done("parse");
700 return;
701 }
702
703 // Fix up all exiting control flow.
704 set_map(entry_map);
705 do_exits();
706
707 // Only reset this now, to make sure that debug information emitted
708 // for exiting control flow still refers to the inlined method.
709 C->set_default_node_notes(caller_nn);
710
711 if (log) log->done("parse nodes='%d' live='%d' memory='%zu'",
712 C->unique(), C->live_nodes(), C->node_arena()->used());
713 }
714
715 //---------------------------do_all_blocks-------------------------------------
716 void Parse::do_all_blocks() {
717 bool has_irreducible = flow()->has_irreducible_entry();
718
719 // Walk over all blocks in Reverse Post-Order.
720 while (true) {
721 bool progress = false;
722 for (int rpo = 0; rpo < block_count(); rpo++) {
723 Block* block = rpo_at(rpo);
724
725 if (block->is_parsed()) continue;
726
727 if (!block->is_merged()) {
728 // Dead block, no state reaches this block
729 continue;
730 }
731
732 // Prepare to parse this block.
733 load_state_from(block);
734
735 if (stopped()) {
736 // Block is dead.
737 continue;
738 }
739
740 NOT_PRODUCT(blocks_parsed++);
741
742 progress = true;
743 if (block->is_loop_head() || block->is_handler() || (has_irreducible && !block->is_ready())) {
744 // Not all preds have been parsed. We must build phis everywhere.
745 // (Note that dead locals do not get phis built, ever.)
746 ensure_phis_everywhere();
747
748 if (block->is_SEL_head()) {
749 // Add predicate to single entry (not irreducible) loop head.
750 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
751 // Predicates may have been added after a dominating if
752 if (!block->has_predicates()) {
753 // Need correct bci for predicate.
754 // It is fine to set it here since do_one_block() will set it anyway.
755 set_parse_bci(block->start());
756 add_parse_predicates();
757 }
758 // Add new region for back branches.
759 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
760 RegionNode *r = new RegionNode(edges+1);
761 _gvn.set_type(r, Type::CONTROL);
762 record_for_igvn(r);
763 r->init_req(edges, control());
764 set_control(r);
765 block->copy_irreducible_status_to(r, jvms());
766 // Add new phis.
767 ensure_phis_everywhere();
768 }
769
770 // Leave behind an undisturbed copy of the map, for future merges.
771 set_map(clone_map());
772 }
773
774 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) {
775 // In the absence of irreducible loops, the Region and Phis
776 // associated with a merge that doesn't involve a backedge can
777 // be simplified now since the RPO parsing order guarantees
778 // that any path which was supposed to reach here has already
779 // been parsed or must be dead.
780 Node* c = control();
781 Node* result = _gvn.transform(control());
782 if (c != result && TraceOptoParse) {
783 tty->print_cr("Block #%d replace %d with %d", block->rpo(), c->_idx, result->_idx);
784 }
785 if (result != top()) {
786 record_for_igvn(result);
787 }
788 }
789
790 // Parse the block.
791 do_one_block();
792
793 // Check for bailouts.
794 if (failing()) return;
795 }
796
797 // with irreducible loops multiple passes might be necessary to parse everything
798 if (!has_irreducible || !progress) {
799 break;
800 }
801 }
802
803 #ifndef PRODUCT
804 blocks_seen += block_count();
805
806 // Make sure there are no half-processed blocks remaining.
807 // Every remaining unprocessed block is dead and may be ignored now.
808 for (int rpo = 0; rpo < block_count(); rpo++) {
809 Block* block = rpo_at(rpo);
810 if (!block->is_parsed()) {
811 if (TraceOptoParse) {
812 tty->print_cr("Skipped dead block %d at bci:%d", rpo, block->start());
813 }
814 assert(!block->is_merged(), "no half-processed blocks");
815 }
816 }
817 #endif
818 }
819
820 static Node* mask_int_value(Node* v, BasicType bt, PhaseGVN* gvn) {
821 switch (bt) {
822 case T_BYTE:
823 v = gvn->transform(new LShiftINode(v, gvn->intcon(24)));
824 v = gvn->transform(new RShiftINode(v, gvn->intcon(24)));
825 break;
826 case T_SHORT:
827 v = gvn->transform(new LShiftINode(v, gvn->intcon(16)));
828 v = gvn->transform(new RShiftINode(v, gvn->intcon(16)));
829 break;
830 case T_CHAR:
831 v = gvn->transform(new AndINode(v, gvn->intcon(0xFFFF)));
832 break;
833 case T_BOOLEAN:
834 v = gvn->transform(new AndINode(v, gvn->intcon(0x1)));
835 break;
836 default:
837 break;
838 }
839 return v;
840 }
841
842 //-------------------------------build_exits----------------------------------
843 // Build normal and exceptional exit merge points.
844 void Parse::build_exits() {
845 // make a clone of caller to prevent sharing of side-effects
846 _exits.set_map(_exits.clone_map());
847 _exits.clean_stack(_exits.sp());
848 _exits.sync_jvms();
849
850 RegionNode* region = new RegionNode(1);
851 record_for_igvn(region);
852 gvn().set_type_bottom(region);
853 _exits.set_control(region);
854
855 // Note: iophi and memphi are not transformed until do_exits.
856 Node* iophi = new PhiNode(region, Type::ABIO);
857 Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
858 gvn().set_type_bottom(iophi);
859 gvn().set_type_bottom(memphi);
860 _exits.set_i_o(iophi);
861 _exits.set_all_memory(memphi);
862
863 // Add a return value to the exit state. (Do not push it yet.)
864 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
865 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
866 if (ret_type->isa_int()) {
867 BasicType ret_bt = method()->return_type()->basic_type();
868 if (ret_bt == T_BOOLEAN ||
869 ret_bt == T_CHAR ||
870 ret_bt == T_BYTE ||
871 ret_bt == T_SHORT) {
872 ret_type = TypeInt::INT;
873 }
874 }
875
876 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
877 // becomes loaded during the subsequent parsing, the loaded and unloaded
878 // types will not join when we transform and push in do_exits().
879 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
880 if (ret_oop_type && !ret_oop_type->is_loaded()) {
881 ret_type = TypeOopPtr::BOTTOM;
882 }
883 int ret_size = type2size[ret_type->basic_type()];
884 Node* ret_phi = new PhiNode(region, ret_type);
885 gvn().set_type_bottom(ret_phi);
886 _exits.ensure_stack(ret_size);
887 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
888 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
889 _exits.set_argument(0, ret_phi); // here is where the parser finds it
890 // Note: ret_phi is not yet pushed, until do_exits.
891 }
892 }
893
894 //----------------------------build_start_state-------------------------------
895 // Construct a state which contains only the incoming arguments from an
896 // unknown caller. The method & bci will be null & InvocationEntryBci.
897 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
898 int arg_size = tf->domain_sig()->cnt();
899 int max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
900 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
901 SafePointNode* map = new SafePointNode(max_size, jvms);
902 jvms->set_map(map);
903 record_for_igvn(map);
904 assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
905 Node_Notes* old_nn = default_node_notes();
906 if (old_nn != nullptr && has_method()) {
907 Node_Notes* entry_nn = old_nn->clone(this);
908 JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
909 entry_jvms->set_offsets(0);
910 entry_jvms->set_bci(entry_bci());
911 entry_nn->set_jvms(entry_jvms);
912 set_default_node_notes(entry_nn);
913 }
914 PhaseGVN& gvn = *initial_gvn();
915 uint i = 0;
916 int arg_num = 0;
917 for (uint j = 0; i < (uint)arg_size; i++) {
918 const Type* t = tf->domain_sig()->field_at(i);
919 Node* parm = nullptr;
920 if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
921 // Inline type arguments are not passed by reference: we get an argument per
922 // field of the inline type. Build InlineTypeNodes from the inline type arguments.
923 GraphKit kit(jvms, &gvn);
924 kit.set_control(map->control());
925 Node* old_mem = map->memory();
926 // Use immutable memory for inline type loads and restore it below
927 kit.set_all_memory(C->immutable_memory());
928 parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
929 map->set_control(kit.control());
930 map->set_memory(old_mem);
931 } else {
932 parm = gvn.transform(new ParmNode(start, j++));
933 }
934 map->init_req(i, parm);
935 // Record all these guys for later GVN.
936 record_for_igvn(parm);
937 if (i >= TypeFunc::Parms && t != Type::HALF) {
938 arg_num++;
939 }
940 }
941 for (; i < map->req(); i++) {
942 map->init_req(i, top());
943 }
944 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
945 set_default_node_notes(old_nn);
946 return jvms;
947 }
948
949 //-----------------------------make_node_notes---------------------------------
950 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
951 if (caller_nn == nullptr) return nullptr;
952 Node_Notes* nn = caller_nn->clone(C);
953 JVMState* caller_jvms = nn->jvms();
954 JVMState* jvms = new (C) JVMState(method(), caller_jvms);
955 jvms->set_offsets(0);
956 jvms->set_bci(_entry_bci);
957 nn->set_jvms(jvms);
958 return nn;
959 }
960
961
962 //--------------------------return_values--------------------------------------
963 void Compile::return_values(JVMState* jvms) {
964 GraphKit kit(jvms);
965 Node* ret = new ReturnNode(TypeFunc::Parms,
966 kit.control(),
967 kit.i_o(),
968 kit.reset_memory(),
969 kit.frameptr(),
970 kit.returnadr());
971 // Add zero or 1 return values
972 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
973 if (ret_size > 0) {
974 kit.inc_sp(-ret_size); // pop the return value(s)
975 kit.sync_jvms();
976 Node* res = kit.argument(0);
977 if (tf()->returns_inline_type_as_fields()) {
978 // Multiple return values (inline type fields): add as many edges
979 // to the Return node as returned values.
980 InlineTypeNode* vt = res->as_InlineType();
981 ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
982 if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
983 ret->init_req(TypeFunc::Parms, vt);
984 } else {
985 // Return the tagged klass pointer to signal scalarization to the caller
986 Node* tagged_klass = vt->tagged_klass(kit.gvn());
987 // Return null if the inline type is null (null marker field is not set)
988 Node* conv = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
989 Node* shl = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
990 Node* shr = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
991 tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
992 ret->init_req(TypeFunc::Parms, tagged_klass);
993 }
994 uint idx = TypeFunc::Parms + 1;
995 vt->pass_fields(&kit, ret, idx, false, false);
996 } else {
997 ret->add_req(res);
998 // Note: The second dummy edge is not needed by a ReturnNode.
999 }
1000 }
1001 // bind it to root
1002 root()->add_req(ret);
1003 record_for_igvn(ret);
1004 initial_gvn()->transform(ret);
1005 }
1006
1007 //------------------------rethrow_exceptions-----------------------------------
1008 // Bind all exception states in the list into a single RethrowNode.
1009 void Compile::rethrow_exceptions(JVMState* jvms) {
1010 GraphKit kit(jvms);
1011 if (!kit.has_exceptions()) return; // nothing to generate
1012 // Load my combined exception state into the kit, with all phis transformed:
1013 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
1014 Node* ex_oop = kit.use_exception_state(ex_map);
1015 RethrowNode* exit = new RethrowNode(kit.control(),
1016 kit.i_o(), kit.reset_memory(),
1017 kit.frameptr(), kit.returnadr(),
1018 // like a return but with exception input
1019 ex_oop);
1020 // bind to root
1021 root()->add_req(exit);
1022 record_for_igvn(exit);
1023 initial_gvn()->transform(exit);
1024 }
1025
1026 //---------------------------do_exceptions-------------------------------------
1027 // Process exceptions arising from the current bytecode.
1028 // Send caught exceptions to the proper handler within this method.
1029 // Unhandled exceptions feed into _exit.
1030 void Parse::do_exceptions() {
1031 if (!has_exceptions()) return;
1032
1033 if (failing()) {
1034 // Pop them all off and throw them away.
1035 while (pop_exception_state() != nullptr) ;
1036 return;
1037 }
1038
1039 PreserveJVMState pjvms(this, false);
1040
1041 SafePointNode* ex_map;
1042 while ((ex_map = pop_exception_state()) != nullptr) {
1043 if (!method()->has_exception_handlers()) {
1044 // Common case: Transfer control outward.
1045 // Doing it this early allows the exceptions to common up
1046 // even between adjacent method calls.
1047 throw_to_exit(ex_map);
1048 } else {
1049 // Have to look at the exception first.
1050 assert(stopped(), "catch_inline_exceptions trashes the map");
1051 catch_inline_exceptions(ex_map);
1052 stop_and_kill_map(); // we used up this exception state; kill it
1053 }
1054 }
1055
1056 // We now return to our regularly scheduled program:
1057 }
1058
1059 //---------------------------throw_to_exit-------------------------------------
1060 // Merge the given map into an exception exit from this method.
1061 // The exception exit will handle any unlocking of receiver.
1062 // The ex_oop must be saved within the ex_map, unlike merge_exception.
1063 void Parse::throw_to_exit(SafePointNode* ex_map) {
1064 // Pop the JVMS to (a copy of) the caller.
1065 GraphKit caller;
1066 caller.set_map_clone(_caller->map());
1067 caller.set_bci(_caller->bci());
1068 caller.set_sp(_caller->sp());
1069 // Copy out the standard machine state:
1070 for (uint i = 0; i < TypeFunc::Parms; i++) {
1071 caller.map()->set_req(i, ex_map->in(i));
1072 }
1073 if (ex_map->has_replaced_nodes()) {
1074 _replaced_nodes_for_exceptions = true;
1075 }
1076 caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx);
1077 // ...and the exception:
1078 Node* ex_oop = saved_ex_oop(ex_map);
1079 SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
1080 // Finally, collect the new exception state in my exits:
1081 _exits.add_exception_state(caller_ex_map);
1082 }
1083
1084 //------------------------------do_exits---------------------------------------
1085 void Parse::do_exits() {
1086 set_parse_bci(InvocationEntryBci);
1087
1088 // Now peephole on the return bits
1089 Node* region = _exits.control();
1090 _exits.set_control(gvn().transform(region));
1091
1092 Node* iophi = _exits.i_o();
1093 _exits.set_i_o(gvn().transform(iophi));
1094
1095 // Figure out if we need to emit the trailing barrier. The barrier is only
1096 // needed in the constructors, and only in three cases:
1097 //
1098 // 1. The constructor wrote a final or a @Stable field. All these
1099 // initializations must be ordered before any code after the constructor
1100 // publishes the reference to the newly constructed object. Rather
1101 // than wait for the publication, we simply block the writes here.
1102 // Rather than put a barrier on only those writes which are required
1103 // to complete, we force all writes to complete.
1104 //
1105 // 2. Experimental VM option is used to force the barrier if any field
1106 // was written out in the constructor.
1107 //
1108 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1109 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1110 // MemBarVolatile is used before volatile load instead of after volatile
1111 // store, so there's no barrier after the store.
1112 // We want to guarantee the same behavior as on platforms with total store
1113 // order, although this is not required by the Java memory model.
1114 // In this case, we want to enforce visibility of volatile field
1115 // initializations which are performed in constructors.
1116 // So as with finals, we add a barrier here.
1117 //
1118 // "All bets are off" unless the first publication occurs after a
1119 // normal return from the constructor. We do not attempt to detect
1120 // such unusual early publications. But no barrier is needed on
1121 // exceptional returns, since they cannot publish normally.
1122 //
1123 if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1124 (wrote_final() || wrote_stable() ||
1125 (AlwaysSafeConstructors && wrote_fields()) ||
1126 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1127 Node* recorded_alloc = alloc_with_final_or_stable();
1128 _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1129 recorded_alloc);
1130
1131 // If Memory barrier is created for final fields write
1132 // and allocation node does not escape the initialize method,
1133 // then barrier introduced by allocation node can be removed.
1134 if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1135 AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1136 alloc->compute_MemBar_redundancy(method());
1137 }
1138 if (PrintOpto && (Verbose || WizardMode)) {
1139 method()->print_name();
1140 tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1141 }
1142 }
1143
1144 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1145 // transform each slice of the original memphi:
1146 mms.set_memory(_gvn.transform(mms.memory()));
1147 }
1148 // Clean up input MergeMems created by transforming the slices
1149 _gvn.transform(_exits.merged_memory());
1150
1151 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1152 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1153 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1154 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1155 // If the type we set for the ret_phi in build_exits() is too optimistic and
1156 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1157 // loading. It could also be due to an error, so mark this method as not compilable because
1158 // otherwise this could lead to an infinite compile loop.
1159 // In any case, this code path is rarely (and never in my testing) reached.
1160 C->record_method_not_compilable("Can't determine return type.");
1161 return;
1162 }
1163 if (ret_type->isa_int()) {
1164 BasicType ret_bt = method()->return_type()->basic_type();
1165 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1166 }
1167 _exits.push_node(ret_type->basic_type(), ret_phi);
1168 }
1169
1170 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1171
1172 // Unlock along the exceptional paths.
1173 // This is done late so that we can common up equivalent exceptions
1174 // (e.g., null checks) arising from multiple points within this method.
1175 // See GraphKit::add_exception_state, which performs the commoning.
1176 bool do_synch = method()->is_synchronized();
1177
1178 // record exit from a method if compiled while Dtrace is turned on.
1179 if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
1180 // First move the exception list out of _exits:
1181 GraphKit kit(_exits.transfer_exceptions_into_jvms());
1182 SafePointNode* normal_map = kit.map(); // keep this guy safe
1183 // Now re-collect the exceptions into _exits:
1184 SafePointNode* ex_map;
1185 while ((ex_map = kit.pop_exception_state()) != nullptr) {
1186 Node* ex_oop = kit.use_exception_state(ex_map);
1187 // Force the exiting JVM state to have this method at InvocationEntryBci.
1188 // The exiting JVM state is otherwise a copy of the calling JVMS.
1189 JVMState* caller = kit.jvms();
1190 JVMState* ex_jvms = caller->clone_shallow(C);
1191 ex_jvms->bind_map(kit.clone_map());
1192 ex_jvms->set_bci( InvocationEntryBci);
1193 kit.set_jvms(ex_jvms);
1194 if (do_synch) {
1195 // Add on the synchronized-method box/object combo
1196 kit.map()->push_monitor(_synch_lock);
1197 // Unlock!
1198 kit.shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
1199 }
1200 if (C->env()->dtrace_method_probes()) {
1201 kit.make_dtrace_method_exit(method());
1202 }
1203 if (_replaced_nodes_for_exceptions) {
1204 kit.map()->apply_replaced_nodes(_new_idx);
1205 }
1206 // Done with exception-path processing.
1207 ex_map = kit.make_exception_state(ex_oop);
1208 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
1209 // Pop the last vestige of this method:
1210 caller->clone_shallow(C)->bind_map(ex_map);
1211 _exits.push_exception_state(ex_map);
1212 }
1213 assert(_exits.map() == normal_map, "keep the same return state");
1214 }
1215
1216 {
1217 // Capture very early exceptions (receiver null checks) from caller JVMS
1218 GraphKit caller(_caller);
1219 SafePointNode* ex_map;
1220 while ((ex_map = caller.pop_exception_state()) != nullptr) {
1221 _exits.add_exception_state(ex_map);
1222 }
1223 }
1224 _exits.map()->apply_replaced_nodes(_new_idx);
1225 }
1226
1227 //-----------------------------create_entry_map-------------------------------
1228 // Initialize our parser map to contain the types at method entry.
1229 // For OSR, the map contains a single RawPtr parameter.
1230 // Initial monitor locking for sync. methods is performed by do_method_entry.
1231 SafePointNode* Parse::create_entry_map() {
1232 // Check for really stupid bail-out cases.
1233 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1234 if (len >= 32760) {
1235 // Bailout expected, this is a very rare edge case.
1236 C->record_method_not_compilable("too many local variables");
1237 return nullptr;
1238 }
1239
1240 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1241 _caller->map()->delete_replaced_nodes();
1242
1243 // If this is an inlined method, we may have to do a receiver null check.
1244 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1245 GraphKit kit(_caller);
1246 Node* receiver = kit.argument(0);
1247 Node* null_free = kit.null_check_receiver_before_call(method());
1248 _caller = kit.transfer_exceptions_into_jvms();
1249
1250 if (kit.stopped()) {
1251 _exits.add_exception_states_from(_caller);
1252 _exits.set_jvms(_caller);
1253 return nullptr;
1254 }
1255 }
1256
1257 assert(method() != nullptr, "parser must have a method");
1258
1259 // Create an initial safepoint to hold JVM state during parsing
1260 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1261 set_map(new SafePointNode(len, jvms));
1262
1263 // Capture receiver info for compiled lambda forms.
1264 if (method()->is_compiled_lambda_form()) {
1265 ciInstance* recv_info = _caller->compute_receiver_info(method());
1266 jvms->set_receiver_info(recv_info);
1267 }
1268
1269 jvms->set_map(map());
1270 record_for_igvn(map());
1271 assert(jvms->endoff() == len, "correct jvms sizing");
1272
1273 SafePointNode* inmap = _caller->map();
1274 assert(inmap != nullptr, "must have inmap");
1275 // In case of null check on receiver above
1276 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1277
1278 uint i;
1279
1280 // Pass thru the predefined input parameters.
1281 for (i = 0; i < TypeFunc::Parms; i++) {
1282 map()->init_req(i, inmap->in(i));
1283 }
1284
1285 if (depth() == 1) {
1286 assert(map()->memory()->Opcode() == Op_Parm, "");
1287 // Insert the memory aliasing node
1288 set_all_memory(reset_memory());
1289 }
1290 assert(merged_memory(), "");
1291
1292 // Now add the locals which are initially bound to arguments:
1293 uint arg_size = tf()->domain_sig()->cnt();
1294 ensure_stack(arg_size - TypeFunc::Parms); // OSR methods have funny args
1295 for (i = TypeFunc::Parms; i < arg_size; i++) {
1296 map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1297 }
1298
1299 // Clear out the rest of the map (locals and stack)
1300 for (i = arg_size; i < len; i++) {
1301 map()->init_req(i, top());
1302 }
1303
1304 SafePointNode* entry_map = stop();
1305 return entry_map;
1306 }
1307
1308 //-----------------------------do_method_entry--------------------------------
1309 // Emit any code needed in the pseudo-block before BCI zero.
1310 // The main thing to do is lock the receiver of a synchronized method.
1311 void Parse::do_method_entry() {
1312 set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1313 set_sp(0); // Java Stack Pointer
1314
1315 NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1316
1317 // Check if we need a membar at the beginning of the java.lang.Object
1318 // constructor to satisfy the memory model for strict fields.
1319 if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1320 Node* receiver_obj = local(0);
1321 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1322 // If there's no exact type, check if the declared type has no implementors and add a dependency
1323 const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1324 ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1325 if (klass != nullptr && klass->is_instance_klass()) {
1326 // Exact receiver type, check if there is a strict field
1327 ciInstanceKlass* holder = klass->as_instance_klass();
1328 for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1329 ciField* field = holder->nonstatic_field_at(i);
1330 if (field->is_strict()) {
1331 // Found a strict field, a membar is needed
1332 AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1333 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1334 if (DoEscapeAnalysis && (alloc != nullptr)) {
1335 alloc->compute_MemBar_redundancy(method());
1336 }
1337 break;
1338 }
1339 }
1340 } else if (klass == nullptr) {
1341 // We can't statically determine the type of the receiver and therefore need
1342 // to put a membar here because it could have a strict field.
1343 insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1344 }
1345 }
1346
1347 if (C->env()->dtrace_method_probes()) {
1348 make_dtrace_method_entry(method());
1349 }
1350
1351 #ifdef ASSERT
1352 // Narrow receiver type when it is too broad for the method being parsed.
1353 if (!method()->is_static()) {
1354 ciInstanceKlass* callee_holder = method()->holder();
1355 const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1356
1357 Node* receiver_obj = local(0);
1358 const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1359
1360 if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1361 // Receiver should always be a subtype of callee holder.
1362 // But, since C2 type system doesn't properly track interfaces,
1363 // the invariant can't be expressed in the type system for default methods.
1364 // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1365 assert(callee_holder->is_interface(), "missing subtype check");
1366
1367 // Perform dynamic receiver subtype check against callee holder class w/ a halt on failure.
1368 Node* holder_klass = _gvn.makecon(TypeKlassPtr::make(callee_holder, Type::trust_interfaces));
1369 Node* not_subtype_ctrl = gen_subtype_check(receiver_obj, holder_klass);
1370 assert(!stopped(), "not a subtype");
1371
1372 halt(not_subtype_ctrl, frameptr(), "failed receiver subtype check");
1373 }
1374 }
1375 #endif // ASSERT
1376
1377 // If the method is synchronized, we need to construct a lock node, attach
1378 // it to the Start node, and pin it there.
1379 if (method()->is_synchronized()) {
1380 // Insert a FastLockNode right after the Start which takes as arguments
1381 // the current thread pointer, the "this" pointer & the address of the
1382 // stack slot pair used for the lock. The "this" pointer is a projection
1383 // off the start node, but the locking spot has to be constructed by
1384 // creating a ConLNode of 0, and boxing it with a BoxLockNode. The BoxLockNode
1385 // becomes the second argument to the FastLockNode call. The
1386 // FastLockNode becomes the new control parent to pin it to the start.
1387
1388 // Setup Object Pointer
1389 Node *lock_obj = nullptr;
1390 if (method()->is_static()) {
1391 ciInstance* mirror = _method->holder()->java_mirror();
1392 const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1393 lock_obj = makecon(t_lock);
1394 } else { // Else pass the "this" pointer,
1395 lock_obj = local(0); // which is Parm0 from StartNode
1396 assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1397 }
1398 // Clear out dead values from the debug info.
1399 kill_dead_locals();
1400 // Build the FastLockNode
1401 _synch_lock = shared_lock(lock_obj);
1402 // Check for bailout in shared_lock
1403 if (failing()) { return; }
1404 }
1405
1406 // Feed profiling data for parameters to the type system so it can
1407 // propagate it as speculative types
1408 record_profiled_parameters_for_speculation();
1409 }
1410
1411 //------------------------------init_blocks------------------------------------
1412 // Initialize our parser map to contain the types/monitors at method entry.
1413 void Parse::init_blocks() {
1414 // Create the blocks.
1415 _block_count = flow()->block_count();
1416 _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1417
1418 // Initialize the structs.
1419 for (int rpo = 0; rpo < block_count(); rpo++) {
1420 Block* block = rpo_at(rpo);
1421 new(block) Block(this, rpo);
1422 }
1423
1424 // Collect predecessor and successor information.
1425 for (int rpo = 0; rpo < block_count(); rpo++) {
1426 Block* block = rpo_at(rpo);
1427 block->init_graph(this);
1428 }
1429 }
1430
1431 //-------------------------------init_node-------------------------------------
1432 Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
1433 _flow = outer->flow()->rpo_at(rpo);
1434 _pred_count = 0;
1435 _preds_parsed = 0;
1436 _count = 0;
1437 _is_parsed = false;
1438 _is_handler = false;
1439 _has_merged_backedge = false;
1440 _start_map = nullptr;
1441 _has_predicates = false;
1442 _num_successors = 0;
1443 _all_successors = 0;
1444 _successors = nullptr;
1445 assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
1446 assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
1447 assert(_live_locals.size() == 0, "sanity");
1448
1449 // entry point has additional predecessor
1450 if (flow()->is_start()) _pred_count++;
1451 assert(flow()->is_start() == (this == outer->start_block()), "");
1452 }
1453
1454 //-------------------------------init_graph------------------------------------
1455 void Parse::Block::init_graph(Parse* outer) {
1456 // Create the successor list for this parser block.
1457 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
1458 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
1459 int ns = tfs->length();
1460 int ne = tfe->length();
1461 _num_successors = ns;
1462 _all_successors = ns+ne;
1463 _successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne);
1464 int p = 0;
1465 for (int i = 0; i < ns+ne; i++) {
1466 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
1467 Block* block2 = outer->rpo_at(tf2->rpo());
1468 _successors[i] = block2;
1469
1470 // Accumulate pred info for the other block, too.
1471 // Note: We also need to set _pred_count for exception blocks since they could
1472 // also have normal predecessors (reached without athrow by an explicit jump).
1473 // This also means that next_path_num can be called along exception paths.
1474 block2->_pred_count++;
1475 if (i >= ns) {
1476 block2->_is_handler = true;
1477 }
1478
1479 #ifdef ASSERT
1480 // A block's successors must be distinguishable by BCI.
1481 // That is, no bytecode is allowed to branch to two different
1482 // clones of the same code location.
1483 for (int j = 0; j < i; j++) {
1484 Block* block1 = _successors[j];
1485 if (block1 == block2) continue; // duplicates are OK
1486 assert(block1->start() != block2->start(), "successors have unique bcis");
1487 }
1488 #endif
1489 }
1490 }
1491
1492 //---------------------------successor_for_bci---------------------------------
1493 Parse::Block* Parse::Block::successor_for_bci(int bci) {
1494 for (int i = 0; i < all_successors(); i++) {
1495 Block* block2 = successor_at(i);
1496 if (block2->start() == bci) return block2;
1497 }
1498 // We can actually reach here if ciTypeFlow traps out a block
1499 // due to an unloaded class, and concurrently with compilation the
1500 // class is then loaded, so that a later phase of the parser is
1501 // able to see more of the bytecode CFG. Or, the flow pass and
1502 // the parser can have a minor difference of opinion about executability
1503 // of bytecodes. For example, "obj.field = null" is executable even
1504 // if the field's type is an unloaded class; the flow pass used to
1505 // make a trap for such code.
1506 return nullptr;
1507 }
1508
1509
1510 //-----------------------------stack_type_at-----------------------------------
1511 const Type* Parse::Block::stack_type_at(int i) const {
1512 return get_type(flow()->stack_type_at(i));
1513 }
1514
1515
1516 //-----------------------------local_type_at-----------------------------------
1517 const Type* Parse::Block::local_type_at(int i) const {
1518 // Make dead locals fall to bottom.
1519 if (_live_locals.size() == 0) {
1520 MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
1521 // This bitmap can be zero length if we saw a breakpoint.
1522 // In such cases, pretend they are all live.
1523 ((Block*)this)->_live_locals = live_locals;
1524 }
1525 if (_live_locals.size() > 0 && !_live_locals.at(i))
1526 return Type::BOTTOM;
1527
1528 return get_type(flow()->local_type_at(i));
1529 }
1530
1531
1532 #ifndef PRODUCT
1533
1534 //----------------------------name_for_bc--------------------------------------
1535 // helper method for BytecodeParseHistogram
1536 static const char* name_for_bc(int i) {
1537 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
1538 }
1539
1540 //----------------------------BytecodeParseHistogram------------------------------------
1541 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
1542 _parser = p;
1543 _compiler = c;
1544 if( ! _initialized ) { _initialized = true; reset(); }
1545 }
1546
1547 //----------------------------current_count------------------------------------
1548 int Parse::BytecodeParseHistogram::current_count(BPHType bph_type) {
1549 switch( bph_type ) {
1550 case BPH_transforms: { return _parser->gvn().made_progress(); }
1551 case BPH_values: { return _parser->gvn().made_new_values(); }
1552 default: { ShouldNotReachHere(); return 0; }
1553 }
1554 }
1555
1556 //----------------------------initialized--------------------------------------
1557 bool Parse::BytecodeParseHistogram::initialized() { return _initialized; }
1558
1559 //----------------------------reset--------------------------------------------
1560 void Parse::BytecodeParseHistogram::reset() {
1561 int i = Bytecodes::number_of_codes;
1562 while (i-- > 0) { _bytecodes_parsed[i] = 0; _nodes_constructed[i] = 0; _nodes_transformed[i] = 0; _new_values[i] = 0; }
1563 }
1564
1565 //----------------------------set_initial_state--------------------------------
1566 // Record info when starting to parse one bytecode
1567 void Parse::BytecodeParseHistogram::set_initial_state( Bytecodes::Code bc ) {
1568 if( PrintParseStatistics && !_parser->is_osr_parse() ) {
1569 _initial_bytecode = bc;
1570 _initial_node_count = _compiler->unique();
1571 _initial_transforms = current_count(BPH_transforms);
1572 _initial_values = current_count(BPH_values);
1573 }
1574 }
1575
1576 //----------------------------record_change--------------------------------
1577 // Record results of parsing one bytecode
1578 void Parse::BytecodeParseHistogram::record_change() {
1579 if( PrintParseStatistics && !_parser->is_osr_parse() ) {
1580 ++_bytecodes_parsed[_initial_bytecode];
1581 _nodes_constructed [_initial_bytecode] += (_compiler->unique() - _initial_node_count);
1582 _nodes_transformed [_initial_bytecode] += (current_count(BPH_transforms) - _initial_transforms);
1583 _new_values [_initial_bytecode] += (current_count(BPH_values) - _initial_values);
1584 }
1585 }
1586
1587
1588 //----------------------------print--------------------------------------------
1589 void Parse::BytecodeParseHistogram::print(float cutoff) {
1590 ResourceMark rm;
1591 // print profile
1592 int total = 0;
1593 int i = 0;
1594 for( i = 0; i < Bytecodes::number_of_codes; ++i ) { total += _bytecodes_parsed[i]; }
1595 int abs_sum = 0;
1596 tty->cr(); //0123456789012345678901234567890123456789012345678901234567890123456789
1597 tty->print_cr("Histogram of %d parsed bytecodes:", total);
1598 if( total == 0 ) { return; }
1599 tty->cr();
1600 tty->print_cr("absolute: count of compiled bytecodes of this type");
1601 tty->print_cr("relative: percentage contribution to compiled nodes");
1602 tty->print_cr("nodes : Average number of nodes constructed per bytecode");
1603 tty->print_cr("rnodes : Significance towards total nodes constructed, (nodes*relative)");
1604 tty->print_cr("transforms: Average amount of transform progress per bytecode compiled");
1605 tty->print_cr("values : Average number of node values improved per bytecode");
1606 tty->print_cr("name : Bytecode name");
1607 tty->cr();
1608 tty->print_cr(" absolute relative nodes rnodes transforms values name");
1609 tty->print_cr("----------------------------------------------------------------------");
1610 while (--i > 0) {
1611 int abs = _bytecodes_parsed[i];
1612 float rel = abs * 100.0F / total;
1613 float nodes = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_constructed[i])/_bytecodes_parsed[i];
1614 float rnodes = _bytecodes_parsed[i] == 0 ? 0 : rel * nodes;
1615 float xforms = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _nodes_transformed[i])/_bytecodes_parsed[i];
1616 float values = _bytecodes_parsed[i] == 0 ? 0 : (1.0F * _new_values [i])/_bytecodes_parsed[i];
1617 if (cutoff <= rel) {
1618 tty->print_cr("%10d %7.2f%% %6.1f %6.2f %6.1f %6.1f %s", abs, rel, nodes, rnodes, xforms, values, name_for_bc(i));
1619 abs_sum += abs;
1620 }
1621 }
1622 tty->print_cr("----------------------------------------------------------------------");
1623 float rel_sum = abs_sum * 100.0F / total;
1624 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
1625 tty->print_cr("----------------------------------------------------------------------");
1626 tty->cr();
1627 }
1628 #endif
1629
1630 //----------------------------load_state_from----------------------------------
1631 // Load block/map/sp. But not do not touch iter/bci.
1632 void Parse::load_state_from(Block* block) {
1633 set_block(block);
1634 // load the block's JVM state:
1635 set_map(block->start_map());
1636 set_sp( block->start_sp());
1637 }
1638
1639
1640 //-----------------------------record_state------------------------------------
1641 void Parse::Block::record_state(Parse* p) {
1642 assert(!is_merged(), "can only record state once, on 1st inflow");
1643 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
1644 set_start_map(p->stop());
1645 }
1646
1647
1648 //------------------------------do_one_block-----------------------------------
1649 void Parse::do_one_block() {
1650 if (TraceOptoParse) {
1651 Block *b = block();
1652 int ns = b->num_successors();
1653 int nt = b->all_successors();
1654
1655 tty->print("Parsing block #%d at bci [%d,%d), successors:",
1656 block()->rpo(), block()->start(), block()->limit());
1657 for (int i = 0; i < nt; i++) {
1658 tty->print((( i < ns) ? " %d" : " %d(exception block)"), b->successor_at(i)->rpo());
1659 }
1660 if (b->is_loop_head()) {
1661 tty->print(" loop head");
1662 }
1663 if (b->is_irreducible_loop_entry()) {
1664 tty->print(" irreducible");
1665 }
1666 tty->cr();
1667 }
1668
1669 assert(block()->is_merged(), "must be merged before being parsed");
1670 block()->mark_parsed();
1671
1672 // Set iterator to start of block.
1673 iter().reset_to_bci(block()->start());
1674
1675 if (ProfileExceptionHandlers && block()->is_handler()) {
1676 ciMethodData* methodData = method()->method_data();
1677 if (methodData->is_mature()) {
1678 ciBitData data = methodData->exception_handler_bci_to_data(block()->start());
1679 if (!data.exception_handler_entered() || StressPrunedExceptionHandlers) {
1680 // dead catch block
1681 // Emit an uncommon trap instead of processing the block.
1682 set_parse_bci(block()->start());
1683 uncommon_trap(Deoptimization::Reason_unreached,
1684 Deoptimization::Action_reinterpret,
1685 nullptr, "dead catch block");
1686 return;
1687 }
1688 }
1689 }
1690
1691 CompileLog* log = C->log();
1692
1693 // Parse bytecodes
1694 while (!stopped() && !failing()) {
1695 iter().next();
1696
1697 // Learn the current bci from the iterator:
1698 set_parse_bci(iter().cur_bci());
1699
1700 if (bci() == block()->limit()) {
1701 // Do not walk into the next block until directed by do_all_blocks.
1702 merge(bci());
1703 break;
1704 }
1705 assert(bci() < block()->limit(), "bci still in block");
1706
1707 if (log != nullptr) {
1708 // Output an optional context marker, to help place actions
1709 // that occur during parsing of this BC. If there is no log
1710 // output until the next context string, this context string
1711 // will be silently ignored.
1712 log->set_context("bc code='%d' bci='%d'", (int)bc(), bci());
1713 }
1714
1715 if (block()->has_trap_at(bci())) {
1716 // We must respect the flow pass's traps, because it will refuse
1717 // to produce successors for trapping blocks.
1718 int trap_index = block()->flow()->trap_index();
1719 assert(trap_index != 0, "trap index must be valid");
1720 uncommon_trap(trap_index);
1721 break;
1722 }
1723
1724 NOT_PRODUCT( parse_histogram()->set_initial_state(bc()); );
1725
1726 #ifdef ASSERT
1727 int pre_bc_sp = sp();
1728 int inputs, depth;
1729 bool have_se = !stopped() && compute_stack_effects(inputs, depth);
1730 assert(!have_se || pre_bc_sp >= inputs, "have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs);
1731 #endif //ASSERT
1732
1733 do_one_bytecode();
1734 if (failing()) return;
1735
1736 assert(!have_se || stopped() || failing() || (sp() - pre_bc_sp) == depth,
1737 "incorrect depth prediction: sp=%d, pre_bc_sp=%d, depth=%d", sp(), pre_bc_sp, depth);
1738
1739 do_exceptions();
1740
1741 NOT_PRODUCT( parse_histogram()->record_change(); );
1742
1743 if (log != nullptr)
1744 log->clear_context(); // skip marker if nothing was printed
1745
1746 // Fall into next bytecode. Each bytecode normally has 1 sequential
1747 // successor which is typically made ready by visiting this bytecode.
1748 // If the successor has several predecessors, then it is a merge
1749 // point, starts a new basic block, and is handled like other basic blocks.
1750 }
1751 }
1752
1753
1754 //------------------------------merge------------------------------------------
1755 void Parse::set_parse_bci(int bci) {
1756 set_bci(bci);
1757 Node_Notes* nn = C->default_node_notes();
1758 if (nn == nullptr) return;
1759
1760 // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls.
1761 if (!DebugInlinedCalls && depth() > 1) {
1762 return;
1763 }
1764
1765 // Update the JVMS annotation, if present.
1766 JVMState* jvms = nn->jvms();
1767 if (jvms != nullptr && jvms->bci() != bci) {
1768 // Update the JVMS.
1769 jvms = jvms->clone_shallow(C);
1770 jvms->set_bci(bci);
1771 nn->set_jvms(jvms);
1772 }
1773 }
1774
1775 //------------------------------merge------------------------------------------
1776 // Merge the current mapping into the basic block starting at bci
1777 void Parse::merge(int target_bci) {
1778 Block* target = successor_for_bci(target_bci);
1779 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1780 assert(!target->is_ready(), "our arrival must be expected");
1781 int pnum = target->next_path_num();
1782 merge_common(target, pnum);
1783 }
1784
1785 //-------------------------merge_new_path--------------------------------------
1786 // Merge the current mapping into the basic block, using a new path
1787 void Parse::merge_new_path(int target_bci) {
1788 Block* target = successor_for_bci(target_bci);
1789 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1790 assert(!target->is_ready(), "new path into frozen graph");
1791 int pnum = target->add_new_path();
1792 merge_common(target, pnum);
1793 }
1794
1795 //-------------------------merge_exception-------------------------------------
1796 // Merge the current mapping into the basic block starting at bci
1797 // The ex_oop must be pushed on the stack, unlike throw_to_exit.
1798 void Parse::merge_exception(int target_bci) {
1799 #ifdef ASSERT
1800 if (target_bci <= bci()) {
1801 C->set_exception_backedge();
1802 }
1803 #endif
1804 assert(sp() == 1, "must have only the throw exception on the stack");
1805 Block* target = successor_for_bci(target_bci);
1806 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1807 assert(target->is_handler(), "exceptions are handled by special blocks");
1808 int pnum = target->add_new_path();
1809 merge_common(target, pnum);
1810 }
1811
1812 //--------------------handle_missing_successor---------------------------------
1813 void Parse::handle_missing_successor(int target_bci) {
1814 #ifndef PRODUCT
1815 Block* b = block();
1816 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1817 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1818 #endif
1819 ShouldNotReachHere();
1820 }
1821
1822 //--------------------------merge_common---------------------------------------
1823 void Parse::merge_common(Parse::Block* target, int pnum) {
1824 if (TraceOptoParse) {
1825 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1826 }
1827
1828 // Zap extra stack slots to top
1829 assert(sp() == target->start_sp(), "");
1830 clean_stack(sp());
1831
1832 // Check for merge conflicts involving inline types
1833 JVMState* old_jvms = map()->jvms();
1834 int old_bci = bci();
1835 JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1836 tmp_jvms->set_should_reexecute(true);
1837 tmp_jvms->bind_map(map());
1838 // Execution needs to restart a the next bytecode (entry of next
1839 // block)
1840 if (target->is_merged() ||
1841 pnum > PhiNode::Input ||
1842 target->is_handler() ||
1843 target->is_loop_head()) {
1844 set_parse_bci(target->start());
1845 for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1846 Node* n = map()->in(j); // Incoming change to target state.
1847 const Type* t = nullptr;
1848 if (tmp_jvms->is_loc(j)) {
1849 t = target->local_type_at(j - tmp_jvms->locoff());
1850 } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1851 t = target->stack_type_at(j - tmp_jvms->stkoff());
1852 }
1853 if (t != nullptr && t != Type::BOTTOM) {
1854 // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1855 // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1856 // merge is an oop, each merge input needs to be either an oop or an buffered
1857 // InlineTypeNode.
1858 if (!t->is_inlinetypeptr()) {
1859 // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1860 // InlineTypeNode
1861 if (n->is_InlineType()) {
1862 map()->set_req(j, n->as_InlineType()->buffer(this));
1863 }
1864 } else {
1865 // Since the merge is a value object, it can either be an oop or an InlineTypeNode
1866 if (!target->is_merged()) {
1867 // This is the first processed input of the merge. If it is an InlineTypeNode, the
1868 // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be
1869 // scalarized as well. However, we cannot blindly scalarize an inline type oop here
1870 // since it may be larval
1871 if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) {
1872 // Null constant implies that this is not a larval object
1873 map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1874 }
1875 } else {
1876 Node* phi = target->start_map()->in(j);
1877 if (phi->is_InlineType()) {
1878 // Larval oops cannot be merged with non-larval ones, and since the merge point is
1879 // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge
1880 // into phi
1881 if (!n->is_InlineType()) {
1882 map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass()));
1883 }
1884 } else {
1885 // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode
1886 if (n->is_InlineType()) {
1887 map()->set_req(j, n->as_InlineType()->buffer(this));
1888 }
1889 }
1890 }
1891 }
1892 }
1893 }
1894 }
1895 old_jvms->bind_map(map());
1896 set_parse_bci(old_bci);
1897
1898 if (!target->is_merged()) { // No prior mapping at this bci
1899 if (TraceOptoParse) { tty->print(" with empty state"); }
1900
1901 // If this path is dead, do not bother capturing it as a merge.
1902 // It is "as if" we had 1 fewer predecessors from the beginning.
1903 if (stopped()) {
1904 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1905 return;
1906 }
1907
1908 // Make a region if we know there are multiple or unpredictable inputs.
1909 // (Also, if this is a plain fall-through, we might see another region,
1910 // which must not be allowed into this block's map.)
1911 if (pnum > PhiNode::Input // Known multiple inputs.
1912 || target->is_handler() // These have unpredictable inputs.
1913 || target->is_loop_head() // Known multiple inputs
1914 || control()->is_Region()) { // We must hide this guy.
1915
1916 int current_bci = bci();
1917 set_parse_bci(target->start()); // Set target bci
1918 if (target->is_SEL_head()) {
1919 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1920 if (target->start() == 0) {
1921 // Add Parse Predicates for the special case when
1922 // there are backbranches to the method entry.
1923 add_parse_predicates();
1924 }
1925 }
1926 // Add a Region to start the new basic block. Phis will be added
1927 // later lazily.
1928 int edges = target->pred_count();
1929 if (edges < pnum) edges = pnum; // might be a new path!
1930 RegionNode *r = new RegionNode(edges+1);
1931 gvn().set_type(r, Type::CONTROL);
1932 record_for_igvn(r);
1933 // zap all inputs to null for debugging (done in Node(uint) constructor)
1934 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1935 r->init_req(pnum, control());
1936 set_control(r);
1937 target->copy_irreducible_status_to(r, jvms());
1938 set_parse_bci(current_bci); // Restore bci
1939 }
1940
1941 // Convert the existing Parser mapping into a mapping at this bci.
1942 store_state_to(target);
1943 assert(target->is_merged(), "do not come here twice");
1944
1945 } else { // Prior mapping at this bci
1946 if (TraceOptoParse) { tty->print(" with previous state"); }
1947 #ifdef ASSERT
1948 if (target->is_SEL_head()) {
1949 target->mark_merged_backedge(block());
1950 }
1951 #endif
1952
1953 // We must not manufacture more phis if the target is already parsed.
1954 bool nophi = target->is_parsed();
1955
1956 SafePointNode* newin = map();// Hang on to incoming mapping
1957 Block* save_block = block(); // Hang on to incoming block;
1958 load_state_from(target); // Get prior mapping
1959
1960 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1961 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1962 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1963 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1964
1965 // Iterate over my current mapping and the old mapping.
1966 // Where different, insert Phi functions.
1967 // Use any existing Phi functions.
1968 assert(control()->is_Region(), "must be merging to a region");
1969 RegionNode* r = control()->as_Region();
1970
1971 // Compute where to merge into
1972 // Merge incoming control path
1973 r->init_req(pnum, newin->control());
1974
1975 if (pnum == 1) { // Last merge for this Region?
1976 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1977 Node* result = _gvn.transform(r);
1978 if (r != result && TraceOptoParse) {
1979 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1980 }
1981 }
1982 record_for_igvn(r);
1983 }
1984
1985 // Update all the non-control inputs to map:
1986 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1987 bool check_elide_phi = target->is_SEL_backedge(save_block);
1988 bool last_merge = (pnum == PhiNode::Input);
1989 for (uint j = 1; j < newin->req(); j++) {
1990 Node* m = map()->in(j); // Current state of target.
1991 Node* n = newin->in(j); // Incoming change to target state.
1992 Node* phi;
1993 if (m->is_Phi() && m->as_Phi()->region() == r) {
1994 phi = m;
1995 } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1996 phi = m;
1997 } else {
1998 phi = nullptr;
1999 }
2000 if (m != n) { // Different; must merge
2001 switch (j) {
2002 // Frame pointer and Return Address never changes
2003 case TypeFunc::FramePtr:// Drop m, use the original value
2004 case TypeFunc::ReturnAdr:
2005 break;
2006 case TypeFunc::Memory: // Merge inputs to the MergeMem node
2007 assert(phi == nullptr, "the merge contains phis, not vice versa");
2008 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
2009 continue;
2010 default: // All normal stuff
2011 if (phi == nullptr) {
2012 const JVMState* jvms = map()->jvms();
2013 if (EliminateNestedLocks &&
2014 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
2015 // BoxLock nodes are not commoning when EliminateNestedLocks is on.
2016 // Use old BoxLock node as merged box.
2017 assert(newin->jvms()->is_monitor_box(j), "sanity");
2018 // This assert also tests that nodes are BoxLock.
2019 assert(BoxLockNode::same_slot(n, m), "sanity");
2020 BoxLockNode* old_box = m->as_BoxLock();
2021 if (n->as_BoxLock()->is_unbalanced() && !old_box->is_unbalanced()) {
2022 // Preserve Unbalanced status.
2023 //
2024 // `old_box` can have only Regular or Coarsened status
2025 // because this code is executed only during Parse phase and
2026 // Incremental Inlining before EA and Macro nodes elimination.
2027 //
2028 // Incremental Inlining is executed after IGVN optimizations
2029 // during which BoxLock can be marked as Coarsened.
2030 old_box->set_coarsened(); // Verifies state
2031 old_box->set_unbalanced();
2032 }
2033 C->gvn_replace_by(n, m);
2034 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2035 phi = ensure_phi(j, nophi);
2036 }
2037 }
2038 break;
2039 }
2040 }
2041 // At this point, n might be top if:
2042 // - there is no phi (because TypeFlow detected a conflict), or
2043 // - the corresponding control edges is top (a dead incoming path)
2044 // It is a bug if we create a phi which sees a garbage value on a live path.
2045
2046 // Merging two inline types?
2047 if (phi != nullptr && phi->is_InlineType()) {
2048 // Reload current state because it may have been updated by ensure_phi
2049 assert(phi == map()->in(j), "unexpected value in map");
2050 assert(phi->as_InlineType()->has_phi_inputs(r), "");
2051 InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2052 InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2053 assert(vtm == phi, "Inline type should have Phi input");
2054
2055 #ifdef ASSERT
2056 if (TraceOptoParse) {
2057 tty->print_cr("\nMerging inline types");
2058 tty->print_cr("Current:");
2059 vtm->dump(2);
2060 tty->print_cr("Incoming:");
2061 vtn->dump(2);
2062 tty->cr();
2063 }
2064 #endif
2065 // Do the merge
2066 vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2067 if (last_merge) {
2068 map()->set_req(j, _gvn.transform(vtm));
2069 record_for_igvn(vtm);
2070 }
2071 } else if (phi != nullptr) {
2072 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2073 assert(phi->as_Phi()->region() == r, "");
2074 phi->set_req(pnum, n); // Then add 'n' to the merge
2075 if (last_merge) {
2076 // Last merge for this Phi.
2077 // So far, Phis have had a reasonable type from ciTypeFlow.
2078 // Now _gvn will join that with the meet of current inputs.
2079 // BOTTOM is never permissible here, 'cause pessimistically
2080 // Phis of pointers cannot lose the basic pointer type.
2081 DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2082 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2083 map()->set_req(j, _gvn.transform(phi));
2084 DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2085 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2086 record_for_igvn(phi);
2087 }
2088 }
2089 } // End of for all values to be merged
2090
2091 if (last_merge && !r->in(0)) { // The occasional useless Region
2092 assert(control() == r, "");
2093 set_control(r->nonnull_req());
2094 }
2095
2096 map()->merge_replaced_nodes_with(newin);
2097
2098 // newin has been subsumed into the lazy merge, and is now dead.
2099 set_block(save_block);
2100
2101 stop(); // done with this guy, for now
2102 }
2103
2104 if (TraceOptoParse) {
2105 tty->print_cr(" on path %d", pnum);
2106 }
2107
2108 // Done with this parser state.
2109 assert(stopped(), "");
2110 }
2111
2112
2113 //--------------------------merge_memory_edges---------------------------------
2114 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
2115 // (nophi means we must not create phis, because we already parsed here)
2116 assert(n != nullptr, "");
2117 // Merge the inputs to the MergeMems
2118 MergeMemNode* m = merged_memory();
2119
2120 assert(control()->is_Region(), "must be merging to a region");
2121 RegionNode* r = control()->as_Region();
2122
2123 PhiNode* base = nullptr;
2124 MergeMemNode* remerge = nullptr;
2125 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
2126 Node *p = mms.force_memory();
2127 Node *q = mms.memory2();
2128 if (mms.is_empty() && nophi) {
2129 // Trouble: No new splits allowed after a loop body is parsed.
2130 // Instead, wire the new split into a MergeMem on the backedge.
2131 // The optimizer will sort it out, slicing the phi.
2132 if (remerge == nullptr) {
2133 guarantee(base != nullptr, "");
2134 assert(base->in(0) != nullptr, "should not be xformed away");
2135 remerge = MergeMemNode::make(base->in(pnum));
2136 gvn().set_type(remerge, Type::MEMORY);
2137 base->set_req(pnum, remerge);
2138 }
2139 remerge->set_memory_at(mms.alias_idx(), q);
2140 continue;
2141 }
2142 assert(!q->is_MergeMem(), "");
2143 PhiNode* phi;
2144 if (p != q) {
2145 phi = ensure_memory_phi(mms.alias_idx(), nophi);
2146 } else {
2147 if (p->is_Phi() && p->as_Phi()->region() == r)
2148 phi = p->as_Phi();
2149 else
2150 phi = nullptr;
2151 }
2152 // Insert q into local phi
2153 if (phi != nullptr) {
2154 assert(phi->region() == r, "");
2155 p = phi;
2156 phi->set_req(pnum, q);
2157 if (mms.at_base_memory()) {
2158 base = phi; // delay transforming it
2159 } else if (pnum == 1) {
2160 record_for_igvn(phi);
2161 p = _gvn.transform(phi);
2162 }
2163 mms.set_memory(p);// store back through the iterator
2164 }
2165 }
2166 // Transform base last, in case we must fiddle with remerging.
2167 if (base != nullptr && pnum == 1) {
2168 record_for_igvn(base);
2169 m->set_base_memory(_gvn.transform(base));
2170 }
2171 }
2172
2173
2174 //------------------------ensure_phis_everywhere-------------------------------
2175 void Parse::ensure_phis_everywhere() {
2176 ensure_phi(TypeFunc::I_O);
2177
2178 // Ensure a phi on all currently known memories.
2179 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
2180 ensure_memory_phi(mms.alias_idx());
2181 DEBUG_ONLY(mms.set_memory()); // keep the iterator happy
2182 }
2183
2184 // Note: This is our only chance to create phis for memory slices.
2185 // If we miss a slice that crops up later, it will have to be
2186 // merged into the base-memory phi that we are building here.
2187 // Later, the optimizer will comb out the knot, and build separate
2188 // phi-loops for each memory slice that matters.
2189
2190 // Monitors must nest nicely and not get confused amongst themselves.
2191 // Phi-ify everything up to the monitors, though.
2192 uint monoff = map()->jvms()->monoff();
2193 uint nof_monitors = map()->jvms()->nof_monitors();
2194
2195 assert(TypeFunc::Parms == map()->jvms()->locoff(), "parser map should contain only youngest jvms");
2196 bool check_elide_phi = block()->is_SEL_head();
2197 for (uint i = TypeFunc::Parms; i < monoff; i++) {
2198 if (!check_elide_phi || !block()->can_elide_SEL_phi(i)) {
2199 ensure_phi(i);
2200 }
2201 }
2202
2203 // Even monitors need Phis, though they are well-structured.
2204 // This is true for OSR methods, and also for the rare cases where
2205 // a monitor object is the subject of a replace_in_map operation.
2206 // See bugs 4426707 and 5043395.
2207 for (uint m = 0; m < nof_monitors; m++) {
2208 ensure_phi(map()->jvms()->monitor_obj_offset(m));
2209 }
2210 }
2211
2212
2213 //-----------------------------add_new_path------------------------------------
2214 // Add a previously unaccounted predecessor to this block.
2215 int Parse::Block::add_new_path() {
2216 // If there is no map, return the lowest unused path number.
2217 if (!is_merged()) return pred_count()+1; // there will be a map shortly
2218
2219 SafePointNode* map = start_map();
2220 if (!map->control()->is_Region())
2221 return pred_count()+1; // there may be a region some day
2222 RegionNode* r = map->control()->as_Region();
2223
2224 // Add new path to the region.
2225 uint pnum = r->req();
2226 r->add_req(nullptr);
2227
2228 for (uint i = 1; i < map->req(); i++) {
2229 Node* n = map->in(i);
2230 if (i == TypeFunc::Memory) {
2231 // Ensure a phi on all currently known memories.
2232 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2233 Node* phi = mms.memory();
2234 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2235 assert(phi->req() == pnum, "must be same size as region");
2236 phi->add_req(nullptr);
2237 }
2238 }
2239 } else {
2240 if (n->is_Phi() && n->as_Phi()->region() == r) {
2241 assert(n->req() == pnum, "must be same size as region");
2242 n->add_req(nullptr);
2243 } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2244 n->as_InlineType()->add_new_path(r);
2245 }
2246 }
2247 }
2248
2249 return pnum;
2250 }
2251
2252 //------------------------------ensure_phi-------------------------------------
2253 // Turn the idx'th entry of the current map into a Phi
2254 Node* Parse::ensure_phi(int idx, bool nocreate) {
2255 SafePointNode* map = this->map();
2256 Node* region = map->control();
2257 assert(region->is_Region(), "");
2258
2259 Node* o = map->in(idx);
2260 assert(o != nullptr, "");
2261
2262 if (o == top()) return nullptr; // TOP always merges into TOP
2263
2264 if (o->is_Phi() && o->as_Phi()->region() == region) {
2265 return o->as_Phi();
2266 }
2267 InlineTypeNode* vt = o->isa_InlineType();
2268 if (vt != nullptr && vt->has_phi_inputs(region)) {
2269 return vt;
2270 }
2271
2272 // Now use a Phi here for merging
2273 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2274 const JVMState* jvms = map->jvms();
2275 const Type* t = nullptr;
2276 if (jvms->is_loc(idx)) {
2277 t = block()->local_type_at(idx - jvms->locoff());
2278 } else if (jvms->is_stk(idx)) {
2279 t = block()->stack_type_at(idx - jvms->stkoff());
2280 } else if (jvms->is_mon(idx)) {
2281 assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2282 t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2283 } else if ((uint)idx < TypeFunc::Parms) {
2284 t = o->bottom_type(); // Type::RETURN_ADDRESS or such-like.
2285 } else {
2286 assert(false, "no type information for this phi");
2287 }
2288
2289 // If the type falls to bottom, then this must be a local that
2290 // is already dead or is mixing ints and oops or some such.
2291 // Forcing it to top makes it go dead.
2292 if (t == Type::BOTTOM) {
2293 map->set_req(idx, top());
2294 return nullptr;
2295 }
2296
2297 // Do not create phis for top either.
2298 // A top on a non-null control flow must be an unused even after the.phi.
2299 if (t == Type::TOP || t == Type::HALF) {
2300 map->set_req(idx, top());
2301 return nullptr;
2302 }
2303
2304 if (vt != nullptr && t->is_inlinetypeptr()) {
2305 // Inline types are merged by merging their field values.
2306 // Create a cloned InlineTypeNode with phi inputs that
2307 // represents the merged inline type and update the map.
2308 vt = vt->clone_with_phis(&_gvn, region);
2309 map->set_req(idx, vt);
2310 return vt;
2311 } else {
2312 PhiNode* phi = PhiNode::make(region, o, t);
2313 gvn().set_type(phi, t);
2314 if (C->do_escape_analysis()) record_for_igvn(phi);
2315 map->set_req(idx, phi);
2316 return phi;
2317 }
2318 }
2319
2320 //--------------------------ensure_memory_phi----------------------------------
2321 // Turn the idx'th slice of the current memory into a Phi
2322 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2323 MergeMemNode* mem = merged_memory();
2324 Node* region = control();
2325 assert(region->is_Region(), "");
2326
2327 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2328 assert(o != nullptr && o != top(), "");
2329
2330 PhiNode* phi;
2331 if (o->is_Phi() && o->as_Phi()->region() == region) {
2332 phi = o->as_Phi();
2333 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2334 // clone the shared base memory phi to make a new memory split
2335 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2336 const Type* t = phi->bottom_type();
2337 const TypePtr* adr_type = C->get_adr_type(idx);
2338 phi = phi->slice_memory(adr_type);
2339 gvn().set_type(phi, t);
2340 }
2341 return phi;
2342 }
2343
2344 // Now use a Phi here for merging
2345 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2346 const Type* t = o->bottom_type();
2347 const TypePtr* adr_type = C->get_adr_type(idx);
2348 phi = PhiNode::make(region, o, t, adr_type);
2349 gvn().set_type(phi, t);
2350 if (idx == Compile::AliasIdxBot)
2351 mem->set_base_memory(phi);
2352 else
2353 mem->set_memory_at(idx, phi);
2354 return phi;
2355 }
2356
2357 //------------------------------call_register_finalizer-----------------------
2358 // Check the klass of the receiver and call register_finalizer if the
2359 // class need finalization.
2360 void Parse::call_register_finalizer() {
2361 Node* receiver = local(0);
2362 assert(receiver != nullptr && receiver->bottom_type()->isa_instptr() != nullptr,
2363 "must have non-null instance type");
2364
2365 const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr();
2366 if (tinst != nullptr && tinst->is_loaded() && !tinst->klass_is_exact()) {
2367 // The type isn't known exactly so see if CHA tells us anything.
2368 ciInstanceKlass* ik = tinst->instance_klass();
2369 if (!Dependencies::has_finalizable_subclass(ik)) {
2370 // No finalizable subclasses so skip the dynamic check.
2371 C->dependencies()->assert_has_no_finalizable_subclasses(ik);
2372 return;
2373 }
2374 }
2375
2376 // Insert a dynamic test for whether the instance needs
2377 // finalization. In general this will fold up since the concrete
2378 // class is often visible so the access flags are constant.
2379 Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
2380 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
2381
2382 Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::misc_flags_offset()));
2383 Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
2384
2385 Node* mask = _gvn.transform(new AndINode(access_flags, intcon(KlassFlags::_misc_has_finalizer)));
2386 Node* check = _gvn.transform(new CmpINode(mask, intcon(0)));
2387 Node* test = _gvn.transform(new BoolNode(check, BoolTest::ne));
2388
2389 IfNode* iff = create_and_map_if(control(), test, PROB_MAX, COUNT_UNKNOWN);
2390
2391 RegionNode* result_rgn = new RegionNode(3);
2392 record_for_igvn(result_rgn);
2393
2394 Node *skip_register = _gvn.transform(new IfFalseNode(iff));
2395 result_rgn->init_req(1, skip_register);
2396
2397 Node *needs_register = _gvn.transform(new IfTrueNode(iff));
2398 set_control(needs_register);
2399 if (stopped()) {
2400 // There is no slow path.
2401 result_rgn->init_req(2, top());
2402 } else {
2403 Node *call = make_runtime_call(RC_NO_LEAF,
2404 OptoRuntime::register_finalizer_Type(),
2405 OptoRuntime::register_finalizer_Java(),
2406 nullptr, TypePtr::BOTTOM,
2407 receiver);
2408 make_slow_call_ex(call, env()->Throwable_klass(), true);
2409
2410 Node* fast_io = call->in(TypeFunc::I_O);
2411 Node* fast_mem = call->in(TypeFunc::Memory);
2412 // These two phis are pre-filled with copies of of the fast IO and Memory
2413 Node* io_phi = PhiNode::make(result_rgn, fast_io, Type::ABIO);
2414 Node* mem_phi = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2415
2416 result_rgn->init_req(2, control());
2417 io_phi ->init_req(2, i_o());
2418 mem_phi ->init_req(2, reset_memory());
2419
2420 set_all_memory( _gvn.transform(mem_phi) );
2421 set_i_o( _gvn.transform(io_phi) );
2422 }
2423
2424 set_control( _gvn.transform(result_rgn) );
2425 }
2426
2427 // Add check to deoptimize once holder klass is fully initialized.
2428 void Parse::clinit_deopt() {
2429 assert(C->has_method(), "only for normal compilations");
2430 assert(depth() == 1, "only for main compiled method");
2431 assert(is_normal_parse(), "no barrier needed on osr entry");
2432 assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2433
2434 set_parse_bci(0);
2435
2436 Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2437 guard_klass_being_initialized(holder);
2438 }
2439
2440 //------------------------------return_current---------------------------------
2441 // Append current _map to _exit_return
2442 void Parse::return_current(Node* value) {
2443 if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2444 call_register_finalizer();
2445 }
2446
2447 // frame pointer is always same, already captured
2448 if (value != nullptr) {
2449 Node* phi = _exits.argument(0);
2450 const Type* return_type = phi->bottom_type();
2451 const TypeInstPtr* tr = return_type->isa_instptr();
2452 if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2453 return_type->is_inlinetypeptr()) {
2454 // Inline type is returned as fields, make sure it is scalarized
2455 if (!value->is_InlineType()) {
2456 value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2457 }
2458 if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2459 // Returning from root or an incrementally inlined method. Make sure all non-flat
2460 // fields are buffered and re-execute if allocation triggers deoptimization.
2461 PreserveReexecuteState preexecs(this);
2462 assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2463 jvms()->set_should_reexecute(true);
2464 inc_sp(1);
2465 value = value->as_InlineType()->allocate_fields(this);
2466 }
2467 } else if (value->is_InlineType()) {
2468 // Inline type is returned as oop, make sure it is buffered and re-execute
2469 // if allocation triggers deoptimization.
2470 PreserveReexecuteState preexecs(this);
2471 jvms()->set_should_reexecute(true);
2472 inc_sp(1);
2473 value = value->as_InlineType()->buffer(this);
2474 }
2475 // ...else
2476 // If returning oops to an interface-return, there is a silent free
2477 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2478 phi->add_req(value);
2479 }
2480
2481 // Do not set_parse_bci, so that return goo is credited to the return insn.
2482 set_bci(InvocationEntryBci);
2483 if (method()->is_synchronized()) {
2484 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2485 }
2486 if (C->env()->dtrace_method_probes()) {
2487 make_dtrace_method_exit(method());
2488 }
2489
2490 SafePointNode* exit_return = _exits.map();
2491 exit_return->in( TypeFunc::Control )->add_req( control() );
2492 exit_return->in( TypeFunc::I_O )->add_req( i_o () );
2493 Node *mem = exit_return->in( TypeFunc::Memory );
2494 for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2495 if (mms.is_empty()) {
2496 // get a copy of the base memory, and patch just this one input
2497 const TypePtr* adr_type = mms.adr_type(C);
2498 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2499 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2500 gvn().set_type_bottom(phi);
2501 phi->del_req(phi->req()-1); // prepare to re-patch
2502 mms.set_memory(phi);
2503 }
2504 mms.memory()->add_req(mms.memory2());
2505 }
2506
2507 if (_first_return) {
2508 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2509 _first_return = false;
2510 } else {
2511 _exits.map()->merge_replaced_nodes_with(map());
2512 }
2513
2514 stop_and_kill_map(); // This CFG path dies here
2515 }
2516
2517
2518 //------------------------------add_safepoint----------------------------------
2519 void Parse::add_safepoint() {
2520 uint parms = TypeFunc::Parms+1;
2521
2522 // Clear out dead values from the debug info.
2523 kill_dead_locals();
2524
2525 // Clone the JVM State
2526 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
2527
2528 // Capture memory state BEFORE a SafePoint. Since we can block at a
2529 // SafePoint we need our GC state to be safe; i.e. we need all our current
2530 // write barriers (card marks) to not float down after the SafePoint so we
2531 // must read raw memory. Likewise we need all oop stores to match the card
2532 // marks. If deopt can happen, we need ALL stores (we need the correct JVM
2533 // state on a deopt).
2534
2535 // We do not need to WRITE the memory state after a SafePoint. The control
2536 // edge will keep card-marks and oop-stores from floating up from below a
2537 // SafePoint and our true dependency added here will keep them from floating
2538 // down below a SafePoint.
2539
2540 // Clone the current memory state
2541 Node* mem = MergeMemNode::make(map()->memory());
2542
2543 mem = _gvn.transform(mem);
2544
2545 // Pass control through the safepoint
2546 sfpnt->init_req(TypeFunc::Control , control());
2547 // Fix edges normally used by a call
2548 sfpnt->init_req(TypeFunc::I_O , top() );
2549 sfpnt->init_req(TypeFunc::Memory , mem );
2550 sfpnt->init_req(TypeFunc::ReturnAdr, top() );
2551 sfpnt->init_req(TypeFunc::FramePtr , top() );
2552
2553 // Create a node for the polling address
2554 Node *polladr;
2555 Node *thread = _gvn.transform(new ThreadLocalNode());
2556 Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(JavaThread::polling_page_offset())));
2557 polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
2558 sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
2559
2560 // Fix up the JVM State edges
2561 add_safepoint_edges(sfpnt);
2562 Node *transformed_sfpnt = _gvn.transform(sfpnt);
2563 set_control(transformed_sfpnt);
2564
2565 // Provide an edge from root to safepoint. This makes the safepoint
2566 // appear useful until the parse has completed.
2567 if (transformed_sfpnt->is_SafePoint()) {
2568 assert(C->root() != nullptr, "Expect parse is still valid");
2569 C->root()->add_prec(transformed_sfpnt);
2570 }
2571 }
2572
2573 #ifndef PRODUCT
2574 //------------------------show_parse_info--------------------------------------
2575 void Parse::show_parse_info() {
2576 InlineTree* ilt = nullptr;
2577 if (C->ilt() != nullptr) {
2578 JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller();
2579 ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method());
2580 }
2581 if (PrintCompilation && Verbose) {
2582 if (depth() == 1) {
2583 if( ilt->count_inlines() ) {
2584 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2585 ilt->count_inline_bcs());
2586 tty->cr();
2587 }
2588 } else {
2589 if (method()->is_synchronized()) tty->print("s");
2590 if (method()->has_exception_handlers()) tty->print("!");
2591 // Check this is not the final compiled version
2592 if (C->trap_can_recompile()) {
2593 tty->print("-");
2594 } else {
2595 tty->print(" ");
2596 }
2597 method()->print_short_name();
2598 if (is_osr_parse()) {
2599 tty->print(" @ %d", osr_bci());
2600 }
2601 tty->print(" (%d bytes)",method()->code_size());
2602 if (ilt->count_inlines()) {
2603 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2604 ilt->count_inline_bcs());
2605 }
2606 tty->cr();
2607 }
2608 }
2609 if (PrintOpto && (depth() == 1 || PrintOptoInlining)) {
2610 // Print that we succeeded; suppress this message on the first osr parse.
2611
2612 if (method()->is_synchronized()) tty->print("s");
2613 if (method()->has_exception_handlers()) tty->print("!");
2614 // Check this is not the final compiled version
2615 if (C->trap_can_recompile() && depth() == 1) {
2616 tty->print("-");
2617 } else {
2618 tty->print(" ");
2619 }
2620 if( depth() != 1 ) { tty->print(" "); } // missing compile count
2621 for (int i = 1; i < depth(); ++i) { tty->print(" "); }
2622 method()->print_short_name();
2623 if (is_osr_parse()) {
2624 tty->print(" @ %d", osr_bci());
2625 }
2626 if (ilt->caller_bci() != -1) {
2627 tty->print(" @ %d", ilt->caller_bci());
2628 }
2629 tty->print(" (%d bytes)",method()->code_size());
2630 if (ilt->count_inlines()) {
2631 tty->print(" __inlined %d (%d bytes)", ilt->count_inlines(),
2632 ilt->count_inline_bcs());
2633 }
2634 tty->cr();
2635 }
2636 }
2637
2638
2639 //------------------------------dump-------------------------------------------
2640 // Dump information associated with the bytecodes of current _method
2641 void Parse::dump() {
2642 if( method() != nullptr ) {
2643 // Iterate over bytecodes
2644 ciBytecodeStream iter(method());
2645 for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) {
2646 dump_bci( iter.cur_bci() );
2647 tty->cr();
2648 }
2649 }
2650 }
2651
2652 // Dump information associated with a byte code index, 'bci'
2653 void Parse::dump_bci(int bci) {
2654 // Output info on merge-points, cloning, and within _jsr..._ret
2655 // NYI
2656 tty->print(" bci:%d", bci);
2657 }
2658
2659 #endif