78 tty->print_cr("Blocks parsed: %u Blocks seen: %u", blocks_parsed, blocks_seen);
79
80 if (explicit_null_checks_inserted) {
81 tty->print_cr("%u original null checks - %u elided (%2u%%); optimizer leaves %u,",
82 explicit_null_checks_inserted, explicit_null_checks_elided,
83 (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
84 all_null_checks_found);
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%u implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98 }
99 #endif
100
101 //------------------------------ON STACK REPLACEMENT---------------------------
102
103 // Construct a node which can be used to get incoming state for
104 // on stack replacement.
105 Node *Parse::fetch_interpreter_state(int index,
106 BasicType bt,
107 Node *local_addrs,
108 Node *local_addrs_base) {
109 Node *mem = memory(Compile::AliasIdxRaw);
110 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
111 Node *ctl = control();
112
113 // Very similar to LoadNode::make, except we handle un-aligned longs and
114 // doubles on Sparc. Intel can handle them just fine directly.
115 Node *l = nullptr;
116 switch (bt) { // Signature is flattened
117 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
371
372 if (bad_type_exit->control()->req() > 1) {
373 // Build an uncommon trap here, if any inputs can be unexpected.
374 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
375 record_for_igvn(bad_type_exit->control());
376 SafePointNode* types_are_good = map();
377 set_map(bad_type_exit);
378 // The unexpected type happens because a new edge is active
379 // in the CFG, which typeflow had previously ignored.
380 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
381 // This x will be typed as Integer if notReached is not yet linked.
382 // It could also happen due to a problem in ciTypeFlow analysis.
383 uncommon_trap(Deoptimization::Reason_constraint,
384 Deoptimization::Action_reinterpret);
385 set_map(types_are_good);
386 }
387 }
388
389 //------------------------------Parse------------------------------------------
390 // Main parser constructor.
391 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
392 : _exits(caller)
393 {
394 // Init some variables
395 _caller = caller;
396 _method = parse_method;
397 _expected_uses = expected_uses;
398 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
399 _wrote_final = false;
400 _wrote_volatile = false;
401 _wrote_stable = false;
402 _wrote_fields = false;
403 _alloc_with_final = nullptr;
404 _block = nullptr;
405 _first_return = true;
406 _replaced_nodes_for_exceptions = false;
407 _new_idx = C->unique();
408 DEBUG_ONLY(_entry_bci = UnknownBci);
409 DEBUG_ONLY(_block_count = -1);
410 DEBUG_ONLY(_blocks = (Block*)-1);
411 #ifndef PRODUCT
412 if (PrintCompilation || PrintOpto) {
413 // Make sure I have an inline tree, so I can print messages about it.
414 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
415 }
416 _max_switch_depth = 0;
417 _est_switch_depth = 0;
418 #endif
419
420 if (parse_method->has_reserved_stack_access()) {
421 C->set_has_reserved_stack_access(true);
422 }
423
424 if (parse_method->is_synchronized() || parse_method->has_monitor_bytecodes()) {
425 C->set_has_monitors(true);
426 }
427
428 _iter.reset_to_method(method());
429 C->set_has_loops(C->has_loops() || method()->has_loops());
430
431 if (_expected_uses <= 0) {
432 _prof_factor = 1;
433 } else {
434 float prof_total = parse_method->interpreter_invocation_count();
435 if (prof_total <= _expected_uses) {
436 _prof_factor = 1;
437 } else {
513 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
514 }
515 if (Verbose) {
516 method()->print();
517 method()->print_codes();
518 _flow->print();
519 }
520 }
521 #endif
522 }
523
524 #ifdef ASSERT
525 if (depth() == 1) {
526 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
527 } else {
528 assert(!this->is_osr_parse(), "no recursive OSR");
529 }
530 #endif
531
532 #ifndef PRODUCT
533 if (_flow->has_irreducible_entry()) {
534 C->set_parsed_irreducible_loop(true);
535 }
536
537 methods_parsed++;
538 // add method size here to guarantee that inlined methods are added too
539 if (CITime)
540 _total_bytes_compiled += method()->code_size();
541
542 show_parse_info();
543 #endif
544
545 if (failing()) {
546 if (log) log->done("parse");
547 return;
548 }
549
550 gvn().transform(top());
551
552 // Import the results of the ciTypeFlow.
588
589 // Add check to deoptimize the nmethod if RTM state was changed
590 rtm_deopt();
591 }
592
593 // Check for bailouts during method entry or RTM state check setup.
594 if (failing()) {
595 if (log) log->done("parse");
596 C->set_default_node_notes(caller_nn);
597 return;
598 }
599
600 entry_map = map(); // capture any changes performed by method setup code
601 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
602
603 // We begin parsing as if we have just encountered a jump to the
604 // method entry.
605 Block* entry_block = start_block();
606 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
607 set_map_clone(entry_map);
608 merge_common(entry_block, entry_block->next_path_num());
609
610 #ifndef PRODUCT
611 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
612 set_parse_histogram( parse_histogram_obj );
613 #endif
614
615 // Parse all the basic blocks.
616 do_all_blocks();
617
618 // Check for bailouts during conversion to graph
619 if (failing()) {
620 if (log) log->done("parse");
621 return;
622 }
623
624 // Fix up all exiting control flow.
625 set_map(entry_map);
626 do_exits();
627
628 // Only reset this now, to make sure that debug information emitted
629 // for exiting control flow still refers to the inlined method.
630 C->set_default_node_notes(caller_nn);
631
632 if (log) log->done("parse nodes='%d' live='%d' memory='" SIZE_FORMAT "'",
633 C->unique(), C->live_nodes(), C->node_arena()->used());
634 }
635
636 //---------------------------do_all_blocks-------------------------------------
637 void Parse::do_all_blocks() {
638 bool has_irreducible = flow()->has_irreducible_entry();
639
640 // Walk over all blocks in Reverse Post-Order.
641 while (true) {
642 bool progress = false;
643 for (int rpo = 0; rpo < block_count(); rpo++) {
644 Block* block = rpo_at(rpo);
645
646 if (block->is_parsed()) continue;
647
648 if (!block->is_merged()) {
649 // Dead block, no state reaches this block
650 continue;
651 }
652
653 // Prepare to parse this block.
654 load_state_from(block);
655
656 if (stopped()) {
657 // Block is dead.
658 continue;
659 }
660
661 NOT_PRODUCT(blocks_parsed++);
662
663 progress = true;
664 if (block->is_loop_head() || block->is_handler() || (has_irreducible && !block->is_ready())) {
665 // Not all preds have been parsed. We must build phis everywhere.
666 // (Note that dead locals do not get phis built, ever.)
667 ensure_phis_everywhere();
668
669 if (block->is_SEL_head()) {
670 // Add predicate to single entry (not irreducible) loop head.
671 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
672 // Predicates may have been added after a dominating if
673 if (!block->has_predicates()) {
674 // Need correct bci for predicate.
675 // It is fine to set it here since do_one_block() will set it anyway.
676 set_parse_bci(block->start());
677 add_parse_predicates();
678 }
679 // Add new region for back branches.
680 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
681 RegionNode *r = new RegionNode(edges+1);
682 _gvn.set_type(r, Type::CONTROL);
683 record_for_igvn(r);
684 r->init_req(edges, control());
987 //
988 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
989 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
990 // MemBarVolatile is used before volatile load instead of after volatile
991 // store, so there's no barrier after the store.
992 // We want to guarantee the same behavior as on platforms with total store
993 // order, although this is not required by the Java memory model.
994 // In this case, we want to enforce visibility of volatile field
995 // initializations which are performed in constructors.
996 // So as with finals, we add a barrier here.
997 //
998 // "All bets are off" unless the first publication occurs after a
999 // normal return from the constructor. We do not attempt to detect
1000 // such unusual early publications. But no barrier is needed on
1001 // exceptional returns, since they cannot publish normally.
1002 //
1003 if (method()->is_initializer() &&
1004 (wrote_final() ||
1005 (AlwaysSafeConstructors && wrote_fields()) ||
1006 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1007 _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1008
1009 // If Memory barrier is created for final fields write
1010 // and allocation node does not escape the initialize method,
1011 // then barrier introduced by allocation node can be removed.
1012 if (DoEscapeAnalysis && alloc_with_final()) {
1013 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1014 alloc->compute_MemBar_redundancy(method());
1015 }
1016 if (PrintOpto && (Verbose || WizardMode)) {
1017 method()->print_name();
1018 tty->print_cr(" writes finals and needs a memory barrier");
1019 }
1020 }
1021
1022 // Any method can write a @Stable field; insert memory barriers
1023 // after those also. Can't bind predecessor allocation node (if any)
1024 // with barrier because allocation doesn't always dominate
1025 // MemBarRelease.
1026 if (wrote_stable()) {
1027 _exits.insert_mem_bar(Op_MemBarRelease);
1028 if (PrintOpto && (Verbose || WizardMode)) {
1029 method()->print_name();
1030 tty->print_cr(" writes @Stable and needs a memory barrier");
1031 }
1032 }
1033
1034 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1035 // transform each slice of the original memphi:
1036 mms.set_memory(_gvn.transform(mms.memory()));
1037 }
1038 // Clean up input MergeMems created by transforming the slices
1039 _gvn.transform(_exits.merged_memory());
1040
1041 if (tf()->range()->cnt() > TypeFunc::Parms) {
1042 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1043 Node* ret_phi = _gvn.transform( _exits.argument(0) );
1044 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1045 // If the type we set for the ret_phi in build_exits() is too optimistic and
1046 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1047 // loading. It could also be due to an error, so mark this method as not compilable because
1048 // otherwise this could lead to an infinite compile loop.
1049 // In any case, this code path is rarely (and never in my testing) reached.
1050 #ifdef ASSERT
1051 tty->print_cr("# Can't determine return type.");
1052 tty->print_cr("# exit control");
1053 _exits.control()->dump(2);
1054 tty->print_cr("# ret phi type");
1055 _gvn.type(ret_phi)->dump();
1056 tty->print_cr("# ret phi");
1057 ret_phi->dump(2);
1058 #endif // ASSERT
1059 assert(false, "Can't determine return type.");
1060 C->record_method_not_compilable("Can't determine return type.");
1061 return;
1062 }
1063 if (ret_type->isa_int()) {
1104 kit.map()->apply_replaced_nodes(_new_idx);
1105 }
1106 // Done with exception-path processing.
1107 ex_map = kit.make_exception_state(ex_oop);
1108 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
1109 // Pop the last vestige of this method:
1110 caller->clone_shallow(C)->bind_map(ex_map);
1111 _exits.push_exception_state(ex_map);
1112 }
1113 assert(_exits.map() == normal_map, "keep the same return state");
1114 }
1115
1116 {
1117 // Capture very early exceptions (receiver null checks) from caller JVMS
1118 GraphKit caller(_caller);
1119 SafePointNode* ex_map;
1120 while ((ex_map = caller.pop_exception_state()) != nullptr) {
1121 _exits.add_exception_state(ex_map);
1122 }
1123 }
1124 _exits.map()->apply_replaced_nodes(_new_idx);
1125 }
1126
1127 //-----------------------------create_entry_map-------------------------------
1128 // Initialize our parser map to contain the types at method entry.
1129 // For OSR, the map contains a single RawPtr parameter.
1130 // Initial monitor locking for sync. methods is performed by do_method_entry.
1131 SafePointNode* Parse::create_entry_map() {
1132 // Check for really stupid bail-out cases.
1133 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1134 if (len >= 32760) {
1135 // Bailout expected, this is a very rare edge case.
1136 C->record_method_not_compilable("too many local variables");
1137 return nullptr;
1138 }
1139
1140 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1141 _caller->map()->delete_replaced_nodes();
1142
1143 // If this is an inlined method, we may have to do a receiver null check.
1144 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1145 GraphKit kit(_caller);
1146 kit.null_check_receiver_before_call(method());
1147 _caller = kit.transfer_exceptions_into_jvms();
1148 if (kit.stopped()) {
1149 _exits.add_exception_states_from(_caller);
1150 _exits.set_jvms(_caller);
1151 return nullptr;
1152 }
1153 }
1154
1155 assert(method() != nullptr, "parser must have a method");
1156
1157 // Create an initial safepoint to hold JVM state during parsing
1158 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1159 set_map(new SafePointNode(len, jvms));
1160 jvms->set_map(map());
1161 record_for_igvn(map());
1162 assert(jvms->endoff() == len, "correct jvms sizing");
1163
1164 SafePointNode* inmap = _caller->map();
1165 assert(inmap != nullptr, "must have inmap");
1166 // In case of null check on receiver above
1167 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1168
1169 uint i;
1170
1171 // Pass thru the predefined input parameters.
1172 for (i = 0; i < TypeFunc::Parms; i++) {
1173 map()->init_req(i, inmap->in(i));
1174 }
1175
1176 if (depth() == 1) {
1177 assert(map()->memory()->Opcode() == Op_Parm, "");
1178 // Insert the memory aliasing node
1286 block->init_graph(this);
1287 }
1288 }
1289
1290 //-------------------------------init_node-------------------------------------
1291 Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
1292 _flow = outer->flow()->rpo_at(rpo);
1293 _pred_count = 0;
1294 _preds_parsed = 0;
1295 _count = 0;
1296 _is_parsed = false;
1297 _is_handler = false;
1298 _has_merged_backedge = false;
1299 _start_map = nullptr;
1300 _has_predicates = false;
1301 _num_successors = 0;
1302 _all_successors = 0;
1303 _successors = nullptr;
1304 assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
1305 assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
1306 assert(_live_locals.size() == 0, "sanity");
1307
1308 // entry point has additional predecessor
1309 if (flow()->is_start()) _pred_count++;
1310 assert(flow()->is_start() == (this == outer->start_block()), "");
1311 }
1312
1313 //-------------------------------init_graph------------------------------------
1314 void Parse::Block::init_graph(Parse* outer) {
1315 // Create the successor list for this parser block.
1316 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
1317 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
1318 int ns = tfs->length();
1319 int ne = tfe->length();
1320 _num_successors = ns;
1321 _all_successors = ns+ne;
1322 _successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne);
1323 int p = 0;
1324 for (int i = 0; i < ns+ne; i++) {
1325 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
1326 Block* block2 = outer->rpo_at(tf2->rpo());
1327 _successors[i] = block2;
1328
1329 // Accumulate pred info for the other block, too.
1330 // Note: We also need to set _pred_count for exception blocks since they could
1331 // also have normal predecessors (reached without athrow by an explicit jump).
1332 // This also means that next_path_num can be called along exception paths.
1333 block2->_pred_count++;
1334 if (i >= ns) {
1335 block2->_is_handler = true;
1336 }
1337
1338 #ifdef ASSERT
1339 // A block's successors must be distinguishable by BCI.
1340 // That is, no bytecode is allowed to branch to two different
1341 // clones of the same code location.
1342 for (int j = 0; j < i; j++) {
1343 Block* block1 = _successors[j];
1344 if (block1 == block2) continue; // duplicates are OK
1345 assert(block1->start() != block2->start(), "successors have unique bcis");
1346 }
1347 #endif
1348 }
1349 }
1350
1351 //---------------------------successor_for_bci---------------------------------
1352 Parse::Block* Parse::Block::successor_for_bci(int bci) {
1353 for (int i = 0; i < all_successors(); i++) {
1354 Block* block2 = successor_at(i);
1355 if (block2->start() == bci) return block2;
1356 }
1357 // We can actually reach here if ciTypeFlow traps out a block
1358 // due to an unloaded class, and concurrently with compilation the
1359 // class is then loaded, so that a later phase of the parser is
1360 // able to see more of the bytecode CFG. Or, the flow pass and
1361 // the parser can have a minor difference of opinion about executability
1362 // of bytecodes. For example, "obj.field = null" is executable even
1363 // if the field's type is an unloaded class; the flow pass used to
1364 // make a trap for such code.
1365 return nullptr;
1366 }
1367
1368
1369 //-----------------------------stack_type_at-----------------------------------
1370 const Type* Parse::Block::stack_type_at(int i) const {
1371 return get_type(flow()->stack_type_at(i));
1372 }
1373
1374
1375 //-----------------------------local_type_at-----------------------------------
1376 const Type* Parse::Block::local_type_at(int i) const {
1377 // Make dead locals fall to bottom.
1378 if (_live_locals.size() == 0) {
1379 MethodLivenessResult live_locals = flow()->outer()->method()->liveness_at_bci(start());
1380 // This bitmap can be zero length if we saw a breakpoint.
1381 // In such cases, pretend they are all live.
1382 ((Block*)this)->_live_locals = live_locals;
1383 }
1384 if (_live_locals.size() > 0 && !_live_locals.at(i))
1385 return Type::BOTTOM;
1386
1387 return get_type(flow()->local_type_at(i));
1388 }
1389
1390
1391 #ifndef PRODUCT
1392
1393 //----------------------------name_for_bc--------------------------------------
1394 // helper method for BytecodeParseHistogram
1395 static const char* name_for_bc(int i) {
1396 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
1397 }
1398
1399 //----------------------------BytecodeParseHistogram------------------------------------
1400 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
1401 _parser = p;
1402 _compiler = c;
1403 if( ! _initialized ) { _initialized = true; reset(); }
1404 }
1480 }
1481 tty->print_cr("----------------------------------------------------------------------");
1482 float rel_sum = abs_sum * 100.0F / total;
1483 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
1484 tty->print_cr("----------------------------------------------------------------------");
1485 tty->cr();
1486 }
1487 #endif
1488
1489 //----------------------------load_state_from----------------------------------
1490 // Load block/map/sp. But not do not touch iter/bci.
1491 void Parse::load_state_from(Block* block) {
1492 set_block(block);
1493 // load the block's JVM state:
1494 set_map(block->start_map());
1495 set_sp( block->start_sp());
1496 }
1497
1498
1499 //-----------------------------record_state------------------------------------
1500 void Parse::Block::record_state(Parse* p) {
1501 assert(!is_merged(), "can only record state once, on 1st inflow");
1502 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
1503 set_start_map(p->stop());
1504 }
1505
1506
1507 //------------------------------do_one_block-----------------------------------
1508 void Parse::do_one_block() {
1509 if (TraceOptoParse) {
1510 Block *b = block();
1511 int ns = b->num_successors();
1512 int nt = b->all_successors();
1513
1514 tty->print("Parsing block #%d at bci [%d,%d), successors:",
1515 block()->rpo(), block()->start(), block()->limit());
1516 for (int i = 0; i < nt; i++) {
1517 tty->print((( i < ns) ? " %d" : " %d(exception block)"), b->successor_at(i)->rpo());
1518 }
1519 if (b->is_loop_head()) {
1520 tty->print(" loop head");
1521 }
1522 if (b->is_irreducible_loop_entry()) {
1523 tty->print(" irreducible");
1524 }
1525 tty->cr();
1526 }
1527
1528 assert(block()->is_merged(), "must be merged before being parsed");
1529 block()->mark_parsed();
1530
1531 // Set iterator to start of block.
1532 iter().reset_to_bci(block()->start());
1533
1534 if (ProfileExceptionHandlers && block()->is_handler()) {
1535 ciMethodData* methodData = method()->method_data();
1536 if (methodData->is_mature()) {
1537 ciBitData data = methodData->exception_handler_bci_to_data(block()->start());
1538 if (!data.exception_handler_entered() || StressPrunedExceptionHandlers) {
1539 // dead catch block
1540 // Emit an uncommon trap instead of processing the block.
1541 set_parse_bci(block()->start());
1542 uncommon_trap(Deoptimization::Reason_unreached,
1543 Deoptimization::Action_reinterpret,
1544 nullptr, "dead catch block");
1545 return;
1546 }
1547 }
1665 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1666 assert(target->is_handler(), "exceptions are handled by special blocks");
1667 int pnum = target->add_new_path();
1668 merge_common(target, pnum);
1669 }
1670
1671 //--------------------handle_missing_successor---------------------------------
1672 void Parse::handle_missing_successor(int target_bci) {
1673 #ifndef PRODUCT
1674 Block* b = block();
1675 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1676 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1677 #endif
1678 ShouldNotReachHere();
1679 }
1680
1681 //--------------------------merge_common---------------------------------------
1682 void Parse::merge_common(Parse::Block* target, int pnum) {
1683 if (TraceOptoParse) {
1684 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1685 }
1686
1687 // Zap extra stack slots to top
1688 assert(sp() == target->start_sp(), "");
1689 clean_stack(sp());
1690
1691 if (!target->is_merged()) { // No prior mapping at this bci
1692 if (TraceOptoParse) { tty->print(" with empty state"); }
1693
1694 // If this path is dead, do not bother capturing it as a merge.
1695 // It is "as if" we had 1 fewer predecessors from the beginning.
1696 if (stopped()) {
1697 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1698 return;
1699 }
1700
1701 // Make a region if we know there are multiple or unpredictable inputs.
1702 // (Also, if this is a plain fall-through, we might see another region,
1703 // which must not be allowed into this block's map.)
1704 if (pnum > PhiNode::Input // Known multiple inputs.
1705 || target->is_handler() // These have unpredictable inputs.
1706 || target->is_loop_head() // Known multiple inputs
1707 || control()->is_Region()) { // We must hide this guy.
1708
1709 int current_bci = bci();
1710 set_parse_bci(target->start()); // Set target bci
1711 if (target->is_SEL_head()) {
1712 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1713 if (target->start() == 0) {
1715 // there are backbranches to the method entry.
1716 add_parse_predicates();
1717 }
1718 }
1719 // Add a Region to start the new basic block. Phis will be added
1720 // later lazily.
1721 int edges = target->pred_count();
1722 if (edges < pnum) edges = pnum; // might be a new path!
1723 RegionNode *r = new RegionNode(edges+1);
1724 gvn().set_type(r, Type::CONTROL);
1725 record_for_igvn(r);
1726 // zap all inputs to null for debugging (done in Node(uint) constructor)
1727 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1728 r->init_req(pnum, control());
1729 set_control(r);
1730 target->copy_irreducible_status_to(r, jvms());
1731 set_parse_bci(current_bci); // Restore bci
1732 }
1733
1734 // Convert the existing Parser mapping into a mapping at this bci.
1735 store_state_to(target);
1736 assert(target->is_merged(), "do not come here twice");
1737
1738 } else { // Prior mapping at this bci
1739 if (TraceOptoParse) { tty->print(" with previous state"); }
1740 #ifdef ASSERT
1741 if (target->is_SEL_head()) {
1742 target->mark_merged_backedge(block());
1743 }
1744 #endif
1745 // We must not manufacture more phis if the target is already parsed.
1746 bool nophi = target->is_parsed();
1747
1748 SafePointNode* newin = map();// Hang on to incoming mapping
1749 Block* save_block = block(); // Hang on to incoming block;
1750 load_state_from(target); // Get prior mapping
1751
1752 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1753 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1754 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1755 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1756
1757 // Iterate over my current mapping and the old mapping.
1758 // Where different, insert Phi functions.
1759 // Use any existing Phi functions.
1760 assert(control()->is_Region(), "must be merging to a region");
1761 RegionNode* r = control()->as_Region();
1762
1763 // Compute where to merge into
1764 // Merge incoming control path
1765 r->init_req(pnum, newin->control());
1766
1767 if (pnum == 1) { // Last merge for this Region?
1768 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1769 Node* result = _gvn.transform(r);
1770 if (r != result && TraceOptoParse) {
1771 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1772 }
1773 }
1774 record_for_igvn(r);
1775 }
1776
1777 // Update all the non-control inputs to map:
1778 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1779 bool check_elide_phi = target->is_SEL_backedge(save_block);
1780 for (uint j = 1; j < newin->req(); j++) {
1781 Node* m = map()->in(j); // Current state of target.
1782 Node* n = newin->in(j); // Incoming change to target state.
1783 PhiNode* phi;
1784 if (m->is_Phi() && m->as_Phi()->region() == r)
1785 phi = m->as_Phi();
1786 else
1787 phi = nullptr;
1788 if (m != n) { // Different; must merge
1789 switch (j) {
1790 // Frame pointer and Return Address never changes
1791 case TypeFunc::FramePtr:// Drop m, use the original value
1792 case TypeFunc::ReturnAdr:
1793 break;
1794 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1795 assert(phi == nullptr, "the merge contains phis, not vice versa");
1796 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1797 continue;
1798 default: // All normal stuff
1799 if (phi == nullptr) {
1800 const JVMState* jvms = map()->jvms();
1801 if (EliminateNestedLocks &&
1802 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1803 // BoxLock nodes are not commoning.
1804 // Use old BoxLock node as merged box.
1805 assert(newin->jvms()->is_monitor_box(j), "sanity");
1806 // This assert also tests that nodes are BoxLock.
1807 assert(BoxLockNode::same_slot(n, m), "sanity");
1808 C->gvn_replace_by(n, m);
1809 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1810 phi = ensure_phi(j, nophi);
1811 }
1812 }
1813 break;
1814 }
1815 }
1816 // At this point, n might be top if:
1817 // - there is no phi (because TypeFlow detected a conflict), or
1818 // - the corresponding control edges is top (a dead incoming path)
1819 // It is a bug if we create a phi which sees a garbage value on a live path.
1820
1821 if (phi != nullptr) {
1822 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1823 assert(phi->region() == r, "");
1824 phi->set_req(pnum, n); // Then add 'n' to the merge
1825 if (pnum == PhiNode::Input) {
1826 // Last merge for this Phi.
1827 // So far, Phis have had a reasonable type from ciTypeFlow.
1828 // Now _gvn will join that with the meet of current inputs.
1829 // BOTTOM is never permissible here, 'cause pessimistically
1830 // Phis of pointers cannot lose the basic pointer type.
1831 debug_only(const Type* bt1 = phi->bottom_type());
1832 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1833 map()->set_req(j, _gvn.transform(phi));
1834 debug_only(const Type* bt2 = phi->bottom_type());
1835 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1836 record_for_igvn(phi);
1837 }
1838 }
1839 } // End of for all values to be merged
1840
1841 if (pnum == PhiNode::Input &&
1842 !r->in(0)) { // The occasional useless Region
1843 assert(control() == r, "");
1844 set_control(r->nonnull_req());
1845 }
1846
1847 map()->merge_replaced_nodes_with(newin);
1848
1849 // newin has been subsumed into the lazy merge, and is now dead.
1850 set_block(save_block);
1851
1852 stop(); // done with this guy, for now
1853 }
1854
1855 if (TraceOptoParse) {
1856 tty->print_cr(" on path %d", pnum);
1857 }
1858
1859 // Done with this parser state.
1860 assert(stopped(), "");
1861 }
1862
1863
1864 //--------------------------merge_memory_edges---------------------------------
1865 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
1866 // (nophi means we must not create phis, because we already parsed here)
1867 assert(n != nullptr, "");
1868 // Merge the inputs to the MergeMems
1869 MergeMemNode* m = merged_memory();
1870
1871 assert(control()->is_Region(), "must be merging to a region");
1872 RegionNode* r = control()->as_Region();
1873
1874 PhiNode* base = nullptr;
1875 MergeMemNode* remerge = nullptr;
1876 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
1877 Node *p = mms.force_memory();
1878 Node *q = mms.memory2();
1956 // a monitor object is the subject of a replace_in_map operation.
1957 // See bugs 4426707 and 5043395.
1958 for (uint m = 0; m < nof_monitors; m++) {
1959 ensure_phi(map()->jvms()->monitor_obj_offset(m));
1960 }
1961 }
1962
1963
1964 //-----------------------------add_new_path------------------------------------
1965 // Add a previously unaccounted predecessor to this block.
1966 int Parse::Block::add_new_path() {
1967 // If there is no map, return the lowest unused path number.
1968 if (!is_merged()) return pred_count()+1; // there will be a map shortly
1969
1970 SafePointNode* map = start_map();
1971 if (!map->control()->is_Region())
1972 return pred_count()+1; // there may be a region some day
1973 RegionNode* r = map->control()->as_Region();
1974
1975 // Add new path to the region.
1976 uint pnum = r->req();
1977 r->add_req(nullptr);
1978
1979 for (uint i = 1; i < map->req(); i++) {
1980 Node* n = map->in(i);
1981 if (i == TypeFunc::Memory) {
1982 // Ensure a phi on all currently known memories.
1983 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1984 Node* phi = mms.memory();
1985 if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1986 assert(phi->req() == pnum, "must be same size as region");
1987 phi->add_req(nullptr);
1988 }
1989 }
1990 } else {
1991 if (n->is_Phi() && n->as_Phi()->region() == r) {
1992 assert(n->req() == pnum, "must be same size as region");
1993 n->add_req(nullptr);
1994 }
1995 }
1996 }
1997
1998 return pnum;
1999 }
2000
2001 //------------------------------ensure_phi-------------------------------------
2002 // Turn the idx'th entry of the current map into a Phi
2003 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2004 SafePointNode* map = this->map();
2005 Node* region = map->control();
2006 assert(region->is_Region(), "");
2007
2008 Node* o = map->in(idx);
2009 assert(o != nullptr, "");
2010
2011 if (o == top()) return nullptr; // TOP always merges into TOP
2012
2013 if (o->is_Phi() && o->as_Phi()->region() == region) {
2014 return o->as_Phi();
2033
2034 // If the type falls to bottom, then this must be a local that
2035 // is mixing ints and oops or some such. Forcing it to top
2036 // makes it go dead.
2037 if (t == Type::BOTTOM) {
2038 map->set_req(idx, top());
2039 return nullptr;
2040 }
2041
2042 // Do not create phis for top either.
2043 // A top on a non-null control flow must be an unused even after the.phi.
2044 if (t == Type::TOP || t == Type::HALF) {
2045 map->set_req(idx, top());
2046 return nullptr;
2047 }
2048
2049 PhiNode* phi = PhiNode::make(region, o, t);
2050 gvn().set_type(phi, t);
2051 if (C->do_escape_analysis()) record_for_igvn(phi);
2052 map->set_req(idx, phi);
2053 return phi;
2054 }
2055
2056 //--------------------------ensure_memory_phi----------------------------------
2057 // Turn the idx'th slice of the current memory into a Phi
2058 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2059 MergeMemNode* mem = merged_memory();
2060 Node* region = control();
2061 assert(region->is_Region(), "");
2062
2063 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2064 assert(o != nullptr && o != top(), "");
2065
2066 PhiNode* phi;
2067 if (o->is_Phi() && o->as_Phi()->region() == region) {
2068 phi = o->as_Phi();
2069 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2070 // clone the shared base memory phi to make a new memory split
2071 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2072 const Type* t = phi->bottom_type();
2234 // get a copy of the base memory, and patch just this one input
2235 const TypePtr* adr_type = mms.adr_type(C);
2236 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2237 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2238 gvn().set_type_bottom(phi);
2239 phi->del_req(phi->req()-1); // prepare to re-patch
2240 mms.set_memory(phi);
2241 }
2242 mms.memory()->add_req(mms.memory2());
2243 }
2244
2245 // frame pointer is always same, already captured
2246 if (value != nullptr) {
2247 // If returning oops to an interface-return, there is a silent free
2248 // cast from oop to interface allowed by the Verifier. Make it explicit
2249 // here.
2250 Node* phi = _exits.argument(0);
2251 phi->add_req(value);
2252 }
2253
2254 if (_first_return) {
2255 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2256 _first_return = false;
2257 } else {
2258 _exits.map()->merge_replaced_nodes_with(map());
2259 }
2260
2261 stop_and_kill_map(); // This CFG path dies here
2262 }
2263
2264
2265 //------------------------------add_safepoint----------------------------------
2266 void Parse::add_safepoint() {
2267 uint parms = TypeFunc::Parms+1;
2268
2269 // Clear out dead values from the debug info.
2270 kill_dead_locals();
2271
2272 // Clone the JVM State
2273 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
2274
2275 // Capture memory state BEFORE a SafePoint. Since we can block at a
2276 // SafePoint we need our GC state to be safe; i.e. we need all our current
2277 // write barriers (card marks) to not float down after the SafePoint so we
2278 // must read raw memory. Likewise we need all oop stores to match the card
|
78 tty->print_cr("Blocks parsed: %u Blocks seen: %u", blocks_parsed, blocks_seen);
79
80 if (explicit_null_checks_inserted) {
81 tty->print_cr("%u original null checks - %u elided (%2u%%); optimizer leaves %u,",
82 explicit_null_checks_inserted, explicit_null_checks_elided,
83 (100*explicit_null_checks_elided)/explicit_null_checks_inserted,
84 all_null_checks_found);
85 }
86 if (all_null_checks_found) {
87 tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
88 (100*implicit_null_checks)/all_null_checks_found);
89 }
90 if (SharedRuntime::_implicit_null_throws) {
91 tty->print_cr("%u implicit null exceptions at runtime",
92 SharedRuntime::_implicit_null_throws);
93 }
94
95 if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
96 BytecodeParseHistogram::print();
97 }
98
99 if (DoPartialEscapeAnalysis) {
100 printPeaStatistics();
101 }
102 }
103 #endif
104
105 //------------------------------ON STACK REPLACEMENT---------------------------
106
107 // Construct a node which can be used to get incoming state for
108 // on stack replacement.
109 Node *Parse::fetch_interpreter_state(int index,
110 BasicType bt,
111 Node *local_addrs,
112 Node *local_addrs_base) {
113 Node *mem = memory(Compile::AliasIdxRaw);
114 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
115 Node *ctl = control();
116
117 // Very similar to LoadNode::make, except we handle un-aligned longs and
118 // doubles on Sparc. Intel can handle them just fine directly.
119 Node *l = nullptr;
120 switch (bt) { // Signature is flattened
121 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
375
376 if (bad_type_exit->control()->req() > 1) {
377 // Build an uncommon trap here, if any inputs can be unexpected.
378 bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
379 record_for_igvn(bad_type_exit->control());
380 SafePointNode* types_are_good = map();
381 set_map(bad_type_exit);
382 // The unexpected type happens because a new edge is active
383 // in the CFG, which typeflow had previously ignored.
384 // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
385 // This x will be typed as Integer if notReached is not yet linked.
386 // It could also happen due to a problem in ciTypeFlow analysis.
387 uncommon_trap(Deoptimization::Reason_constraint,
388 Deoptimization::Action_reinterpret);
389 set_map(types_are_good);
390 }
391 }
392
393 //------------------------------Parse------------------------------------------
394 // Main parser constructor.
395 Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, PEAState* caller_state)
396 : _exits(caller)
397 {
398 // Init some variables
399 _caller = caller;
400 _caller_state = caller_state;
401 _method = parse_method;
402 _expected_uses = expected_uses;
403 _depth = 1 + (caller->has_method() ? caller->depth() : 0);
404 _wrote_final = false;
405 _wrote_volatile = false;
406 _wrote_stable = false;
407 _wrote_fields = false;
408 _alloc_with_final = nullptr;
409 _block = nullptr;
410 _first_return = 0;
411 _replaced_nodes_for_exceptions = false;
412 _new_idx = C->unique();
413 DEBUG_ONLY(_entry_bci = UnknownBci);
414 DEBUG_ONLY(_block_count = -1);
415 DEBUG_ONLY(_blocks = (Block*)-1);
416 #ifndef PRODUCT
417 if (PrintCompilation || PrintOpto) {
418 // Make sure I have an inline tree, so I can print messages about it.
419 InlineTree::find_subtree_from_root(C->ilt(), caller, parse_method);
420 }
421 _max_switch_depth = 0;
422 _est_switch_depth = 0;
423
424 if (TraceOptoParse) {
425 tty->print_raw("Parsing method ");
426 parse_method->print_name(tty);
427 tty->print_cr(" {");
428 }
429 #endif
430
431 if (parse_method->has_reserved_stack_access()) {
432 C->set_has_reserved_stack_access(true);
433 }
434
435 if (parse_method->is_synchronized() || parse_method->has_monitor_bytecodes()) {
436 C->set_has_monitors(true);
437 }
438
439 _iter.reset_to_method(method());
440 C->set_has_loops(C->has_loops() || method()->has_loops());
441
442 if (_expected_uses <= 0) {
443 _prof_factor = 1;
444 } else {
445 float prof_total = parse_method->interpreter_invocation_count();
446 if (prof_total <= _expected_uses) {
447 _prof_factor = 1;
448 } else {
524 tty->print_cr("type flow bailout: %s", _flow->failure_reason());
525 }
526 if (Verbose) {
527 method()->print();
528 method()->print_codes();
529 _flow->print();
530 }
531 }
532 #endif
533 }
534
535 #ifdef ASSERT
536 if (depth() == 1) {
537 assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
538 } else {
539 assert(!this->is_osr_parse(), "no recursive OSR");
540 }
541 #endif
542
543 #ifndef PRODUCT
544 // Dump CFG in RPO order before Parsing.
545 if (Verbose && !CITraceTypeFlow) {
546 _flow->rpo_print_on(tty);
547 }
548
549 if (_flow->has_irreducible_entry()) {
550 C->set_parsed_irreducible_loop(true);
551 }
552
553 methods_parsed++;
554 // add method size here to guarantee that inlined methods are added too
555 if (CITime)
556 _total_bytes_compiled += method()->code_size();
557
558 show_parse_info();
559 #endif
560
561 if (failing()) {
562 if (log) log->done("parse");
563 return;
564 }
565
566 gvn().transform(top());
567
568 // Import the results of the ciTypeFlow.
604
605 // Add check to deoptimize the nmethod if RTM state was changed
606 rtm_deopt();
607 }
608
609 // Check for bailouts during method entry or RTM state check setup.
610 if (failing()) {
611 if (log) log->done("parse");
612 C->set_default_node_notes(caller_nn);
613 return;
614 }
615
616 entry_map = map(); // capture any changes performed by method setup code
617 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
618
619 // We begin parsing as if we have just encountered a jump to the
620 // method entry.
621 Block* entry_block = start_block();
622 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
623 set_map_clone(entry_map);
624
625 merge_common(entry_block, entry_block->next_path_num());
626 #ifndef PRODUCT
627 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
628 set_parse_histogram( parse_histogram_obj );
629 #endif
630
631 // Parse all the basic blocks.
632 do_all_blocks();
633
634 // Check for bailouts during conversion to graph
635 if (failing()) {
636 if (log) log->done("parse");
637 return;
638 }
639
640 // Fix up all exiting control flow.
641 set_map(entry_map);
642 do_exits();
643
644 // Only reset this now, to make sure that debug information emitted
645 // for exiting control flow still refers to the inlined method.
646 C->set_default_node_notes(caller_nn);
647
648 if (log) log->done("parse nodes='%d' live='%d' memory='" SIZE_FORMAT "'",
649 C->unique(), C->live_nodes(), C->node_arena()->used());
650 }
651
652 #ifndef PRODUCT
653 Parse::~Parse() {
654 if (TraceOptoParse) {
655 tty->print("} // ");
656 method()->print_short_name(tty);
657 tty->cr();
658 }
659
660 if (DoPartialEscapeAnalysis && PEAVerbose) {
661 PEAState& as = _exits.jvms()->alloc_state();
662 auto objs = PEA()->all_objects();
663 for (int i = 0; i < objs.length(); ++i) {
664 ObjID obj = objs.at(i);
665
666 if (as.contains(obj)) {
667 ObjectState* os = as.get_object_state(obj);
668 tty->print("%4d | Obj%d\t", i, obj->_idx);
669
670 if (os->is_virtual()) {
671 tty->print_cr("V");
672 } else {
673 tty->print_cr("M");
674 }
675 }
676 }
677 }
678 }
679 #endif
680 //---------------------------do_all_blocks-------------------------------------
681 void Parse::do_all_blocks() {
682 bool has_irreducible = flow()->has_irreducible_entry();
683
684 // Walk over all blocks in Reverse Post-Order.
685 while (true) {
686 bool progress = false;
687 for (int rpo = 0; rpo < block_count(); rpo++) {
688 Block* block = rpo_at(rpo);
689
690 if (block->is_parsed()) continue;
691
692 if (!block->is_merged()) {
693 // Dead block, no state reaches this block
694 continue;
695 }
696
697 // Prepare to parse this block.
698 load_state_from(block);
699
700 if (stopped()) {
701 // Block is dead.
702 continue;
703 }
704
705 NOT_PRODUCT(blocks_parsed++);
706
707 progress = true;
708 if (block->is_loop_head() || block->is_handler() || (has_irreducible && !block->is_ready())) {
709 // mark live objects 'Escaped' in map before mounting phi nodes.
710 if (DoPartialEscapeAnalysis && block->is_loop_head()) {
711 PEAState& as = jvms()->alloc_state();
712 as.mark_all_live_objects_escaped(PEA(), map());
713 }
714 // Not all preds have been parsed. We must build phis everywhere.
715 // (Note that dead locals do not get phis built, ever.)
716 ensure_phis_everywhere();
717
718 if (block->is_SEL_head()) {
719 // Add predicate to single entry (not irreducible) loop head.
720 assert(!block->has_merged_backedge(), "only entry paths should be merged for now");
721 // Predicates may have been added after a dominating if
722 if (!block->has_predicates()) {
723 // Need correct bci for predicate.
724 // It is fine to set it here since do_one_block() will set it anyway.
725 set_parse_bci(block->start());
726 add_parse_predicates();
727 }
728 // Add new region for back branches.
729 int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region
730 RegionNode *r = new RegionNode(edges+1);
731 _gvn.set_type(r, Type::CONTROL);
732 record_for_igvn(r);
733 r->init_req(edges, control());
1036 //
1037 // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1038 // support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1039 // MemBarVolatile is used before volatile load instead of after volatile
1040 // store, so there's no barrier after the store.
1041 // We want to guarantee the same behavior as on platforms with total store
1042 // order, although this is not required by the Java memory model.
1043 // In this case, we want to enforce visibility of volatile field
1044 // initializations which are performed in constructors.
1045 // So as with finals, we add a barrier here.
1046 //
1047 // "All bets are off" unless the first publication occurs after a
1048 // normal return from the constructor. We do not attempt to detect
1049 // such unusual early publications. But no barrier is needed on
1050 // exceptional returns, since they cannot publish normally.
1051 //
1052 if (method()->is_initializer() &&
1053 (wrote_final() ||
1054 (AlwaysSafeConstructors && wrote_fields()) ||
1055 (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1056 if (!DoPartialEscapeAnalysis) {
1057 // If Memory barrier is created for final fields write
1058 // and allocation node does not escape the initialize method,
1059 // then barrier introduced by allocation node can be removed.
1060 if (DoEscapeAnalysis && alloc_with_final()) {
1061 AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1062 alloc->compute_MemBar_redundancy(method());
1063 }
1064 } else {
1065 // in PEA, alloc_with_final stores ObjID
1066 AllocateNode* alloc = (ObjID)alloc_with_final();
1067
1068 if (DoEscapeAnalysis && alloc != nullptr) {
1069 Node* obj = _exits.jvms()->alloc_state().get_java_oop(alloc);
1070 _exits.insert_mem_bar(Op_MemBarRelease, obj);
1071 alloc->compute_MemBar_redundancy(method());
1072 }
1073 }
1074
1075 if (PrintOpto && (Verbose || WizardMode)) {
1076 method()->print_name();
1077 tty->print_cr(" writes finals and needs a memory barrier");
1078 }
1079 }
1080
1081 // Any method can write a @Stable field; insert memory barriers
1082 // after those also. Can't bind predecessor allocation node (if any)
1083 // with barrier because allocation doesn't always dominate
1084 // MemBarRelease.
1085 if (wrote_stable()) {
1086 _exits.insert_mem_bar(Op_MemBarRelease);
1087 if (PrintOpto && (Verbose || WizardMode)) {
1088 method()->print_name();
1089 tty->print_cr(" writes @Stable and needs a memory barrier");
1090 }
1091 }
1092
1093 for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1094 // transform each slice of the original memphi:
1095 mms.set_memory(_gvn.transform(mms.memory()));
1096 }
1097 // Clean up input MergeMems created by transforming the slices
1098 _gvn.transform(_exits.merged_memory());
1099
1100 if (tf()->range()->cnt() > TypeFunc::Parms) {
1101 const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1102 Node* const ret_phi_old = _exits.argument(0);
1103 Node* ret_phi = _gvn.transform(ret_phi_old);
1104 if (DoPartialEscapeAnalysis && ret_phi_old != ret_phi) {
1105 PEAState& as = _exits.jvms()->alloc_state();
1106 EscapedState* es = as.as_escaped(PEA(), ret_phi_old);
1107
1108 if (es != nullptr && es->merged_value() == ret_phi_old) {
1109 es->update(ret_phi);
1110 ObjID obj = PEA()->is_alias(ret_phi_old);
1111 PEA()->add_alias(obj, ret_phi);
1112 }
1113 }
1114 if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1115 // If the type we set for the ret_phi in build_exits() is too optimistic and
1116 // the ret_phi is top now, there's an extremely small chance that it may be due to class
1117 // loading. It could also be due to an error, so mark this method as not compilable because
1118 // otherwise this could lead to an infinite compile loop.
1119 // In any case, this code path is rarely (and never in my testing) reached.
1120 #ifdef ASSERT
1121 tty->print_cr("# Can't determine return type.");
1122 tty->print_cr("# exit control");
1123 _exits.control()->dump(2);
1124 tty->print_cr("# ret phi type");
1125 _gvn.type(ret_phi)->dump();
1126 tty->print_cr("# ret phi");
1127 ret_phi->dump(2);
1128 #endif // ASSERT
1129 assert(false, "Can't determine return type.");
1130 C->record_method_not_compilable("Can't determine return type.");
1131 return;
1132 }
1133 if (ret_type->isa_int()) {
1174 kit.map()->apply_replaced_nodes(_new_idx);
1175 }
1176 // Done with exception-path processing.
1177 ex_map = kit.make_exception_state(ex_oop);
1178 assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
1179 // Pop the last vestige of this method:
1180 caller->clone_shallow(C)->bind_map(ex_map);
1181 _exits.push_exception_state(ex_map);
1182 }
1183 assert(_exits.map() == normal_map, "keep the same return state");
1184 }
1185
1186 {
1187 // Capture very early exceptions (receiver null checks) from caller JVMS
1188 GraphKit caller(_caller);
1189 SafePointNode* ex_map;
1190 while ((ex_map = caller.pop_exception_state()) != nullptr) {
1191 _exits.add_exception_state(ex_map);
1192 }
1193 }
1194
1195 _exits.map()->apply_replaced_nodes(_new_idx);
1196 // don't trust replace list. return_current() may mess it up.
1197 // use AllocationState to update it.
1198 if (DoPartialEscapeAnalysis) {
1199 PEAState& as = _exits.jvms()->alloc_state();
1200 SafePointNode* map = _exits.map();
1201 backfill_materialized(map, TypeFunc::Parms, map->req(), as);
1202 }
1203 }
1204
1205 //-----------------------------create_entry_map-------------------------------
1206 // Initialize our parser map to contain the types at method entry.
1207 // For OSR, the map contains a single RawPtr parameter.
1208 // Initial monitor locking for sync. methods is performed by do_method_entry.
1209 SafePointNode* Parse::create_entry_map() {
1210 // Check for really stupid bail-out cases.
1211 uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1212 if (len >= 32760) {
1213 // Bailout expected, this is a very rare edge case.
1214 C->record_method_not_compilable("too many local variables");
1215 return nullptr;
1216 }
1217
1218 // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1219 _caller->map()->delete_replaced_nodes();
1220
1221 // If this is an inlined method, we may have to do a receiver null check.
1222 if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1223 GraphKit kit(_caller);
1224 kit.null_check_receiver_before_call(method());
1225 _caller = kit.transfer_exceptions_into_jvms();
1226 if (kit.stopped()) {
1227 _exits.add_exception_states_from(_caller);
1228 _exits.set_jvms(_caller);
1229 return nullptr;
1230 }
1231 }
1232
1233 assert(method() != nullptr, "parser must have a method");
1234
1235 // Create an initial safepoint to hold JVM state during parsing
1236 JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1237 if (_caller != nullptr && DoPartialEscapeAnalysis) {
1238 jvms->alloc_state() = _caller->alloc_state();
1239 }
1240
1241 set_map(new SafePointNode(len, jvms));
1242 jvms->set_map(map());
1243 record_for_igvn(map());
1244 assert(jvms->endoff() == len, "correct jvms sizing");
1245
1246 SafePointNode* inmap = _caller->map();
1247 assert(inmap != nullptr, "must have inmap");
1248 // In case of null check on receiver above
1249 map()->transfer_replaced_nodes_from(inmap, _new_idx);
1250
1251 uint i;
1252
1253 // Pass thru the predefined input parameters.
1254 for (i = 0; i < TypeFunc::Parms; i++) {
1255 map()->init_req(i, inmap->in(i));
1256 }
1257
1258 if (depth() == 1) {
1259 assert(map()->memory()->Opcode() == Op_Parm, "");
1260 // Insert the memory aliasing node
1368 block->init_graph(this);
1369 }
1370 }
1371
1372 //-------------------------------init_node-------------------------------------
1373 Parse::Block::Block(Parse* outer, int rpo) : _live_locals() {
1374 _flow = outer->flow()->rpo_at(rpo);
1375 _pred_count = 0;
1376 _preds_parsed = 0;
1377 _count = 0;
1378 _is_parsed = false;
1379 _is_handler = false;
1380 _has_merged_backedge = false;
1381 _start_map = nullptr;
1382 _has_predicates = false;
1383 _num_successors = 0;
1384 _all_successors = 0;
1385 _successors = nullptr;
1386 assert(pred_count() == 0 && preds_parsed() == 0, "sanity");
1387 assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity");
1388 assert(!_live_locals.is_valid(), "sanity");
1389
1390 // entry point has additional predecessor
1391 if (flow()->is_start()) _pred_count++;
1392 assert(flow()->is_start() == (this == outer->start_block()), "");
1393 }
1394
1395 //-------------------------------init_graph------------------------------------
1396 void Parse::Block::init_graph(Parse* outer) {
1397 // Create the successor list for this parser block.
1398 GrowableArray<ciTypeFlow::Block*>* tfs = flow()->successors();
1399 GrowableArray<ciTypeFlow::Block*>* tfe = flow()->exceptions();
1400 int ns = tfs->length();
1401 int ne = tfe->length();
1402 _num_successors = ns;
1403 _all_successors = ns+ne;
1404 _successors = (ns+ne == 0) ? nullptr: NEW_RESOURCE_ARRAY(Block*, ns+ne);
1405 for (int i = 0; i < ns+ne; i++) {
1406 ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns);
1407 Block* block2 = outer->rpo_at(tf2->rpo());
1408 _successors[i] = block2;
1409
1410 // Accumulate pred info for the other block, too.
1411 // Note: We also need to set _pred_count for exception blocks since they could
1412 // also have normal predecessors (reached without athrow by an explicit jump).
1413 // This also means that next_path_num can be called along exception paths.
1414 block2->_pred_count++;
1415 if (i >= ns) {
1416 block2->_is_handler = true;
1417 }
1418
1419 #ifdef ASSERT
1420 // A block's successors must be distinguishable by BCI.
1421 // That is, no bytecode is allowed to branch to two different
1422 // clones of the same code location.
1423 for (int j = 0; j < i; j++) {
1424 Block* block1 = _successors[j];
1425 if (block1 == block2) continue; // duplicates are OK
1426 assert(block1->start() != block2->start(), "successors have unique bcis");
1427 }
1428 #endif
1429 }
1430
1431 if (DoPartialEscapeAnalysis) {
1432 GrowableArray<ciTypeFlow::Block*>* tfp = flow()->predecessors();
1433 int np = tfp->length();
1434 _predecessors = np > 0 ? NEW_RESOURCE_ARRAY(Block*, np) : nullptr;
1435 for (int i = 0; i < np; ++i) {
1436 ciTypeFlow::Block* tf2 = tfp->at(i);
1437 Block* block2 = outer->rpo_at(tf2->rpo());
1438 _predecessors[i] = block2;
1439 }
1440 }
1441 }
1442
1443 //---------------------------successor_for_bci---------------------------------
1444 Parse::Block* Parse::Block::successor_for_bci(int bci) {
1445 for (int i = 0; i < all_successors(); i++) {
1446 Block* block2 = successor_at(i);
1447 if (block2->start() == bci) return block2;
1448 }
1449 // We can actually reach here if ciTypeFlow traps out a block
1450 // due to an unloaded class, and concurrently with compilation the
1451 // class is then loaded, so that a later phase of the parser is
1452 // able to see more of the bytecode CFG. Or, the flow pass and
1453 // the parser can have a minor difference of opinion about executability
1454 // of bytecodes. For example, "obj.field = null" is executable even
1455 // if the field's type is an unloaded class; the flow pass used to
1456 // make a trap for such code.
1457 return nullptr;
1458 }
1459
1460
1461 //-----------------------------stack_type_at-----------------------------------
1462 const Type* Parse::Block::stack_type_at(int i) const {
1463 return get_type(flow()->stack_type_at(i));
1464 }
1465
1466
1467 //-----------------------------local_type_at-----------------------------------
1468 const Type* Parse::Block::local_type_at(int i) const {
1469 // This bitmap can be zero length if we saw a breakpoint.
1470 // In such cases, pretend they are all live.
1471 auto live_locals = liveness();
1472 if (live_locals.size() > 0 && !live_locals.at(i))
1473 return Type::BOTTOM;
1474
1475 return get_type(flow()->local_type_at(i));
1476 }
1477
1478
1479 #ifndef PRODUCT
1480
1481 //----------------------------name_for_bc--------------------------------------
1482 // helper method for BytecodeParseHistogram
1483 static const char* name_for_bc(int i) {
1484 return Bytecodes::is_defined(i) ? Bytecodes::name(Bytecodes::cast(i)) : "xxxunusedxxx";
1485 }
1486
1487 //----------------------------BytecodeParseHistogram------------------------------------
1488 Parse::BytecodeParseHistogram::BytecodeParseHistogram(Parse *p, Compile *c) {
1489 _parser = p;
1490 _compiler = c;
1491 if( ! _initialized ) { _initialized = true; reset(); }
1492 }
1568 }
1569 tty->print_cr("----------------------------------------------------------------------");
1570 float rel_sum = abs_sum * 100.0F / total;
1571 tty->print_cr("%10d %7.2f%% (cutoff = %.2f%%)", abs_sum, rel_sum, cutoff);
1572 tty->print_cr("----------------------------------------------------------------------");
1573 tty->cr();
1574 }
1575 #endif
1576
1577 //----------------------------load_state_from----------------------------------
1578 // Load block/map/sp. But not do not touch iter/bci.
1579 void Parse::load_state_from(Block* block) {
1580 set_block(block);
1581 // load the block's JVM state:
1582 set_map(block->start_map());
1583 set_sp( block->start_sp());
1584 }
1585
1586
1587 //-----------------------------record_state------------------------------------
1588 void Parse::Block::record_state(Parse* p, int pnum) {
1589 assert(!is_merged(), "can only record state once, on 1st inflow");
1590 assert(start_sp() == p->sp(), "stack pointer must agree with ciTypeFlow");
1591 set_start_map(p->stop());
1592
1593 _from_block = p->block();
1594 _init_pnum = pnum;
1595 }
1596
1597
1598 //------------------------------do_one_block-----------------------------------
1599 void Parse::do_one_block() {
1600 if (TraceOptoParse) {
1601 Block *b = block();
1602 int ns = b->num_successors();
1603 int nt = b->all_successors();
1604
1605 tty->print("Parsing block #%d at bci [%d,%d), successors:",
1606 block()->rpo(), block()->start(), block()->limit());
1607 for (int i = 0; i < nt; i++) {
1608 tty->print((( i < ns) ? " %d" : " %d(exception block)"), b->successor_at(i)->rpo());
1609 }
1610 if (b->is_loop_head()) {
1611 tty->print(" loop head");
1612 }
1613 if (b->is_irreducible_loop_entry()) {
1614 tty->print(" irreducible");
1615 }
1616 tty->cr();
1617 }
1618
1619 #ifndef PRODUCT
1620 if (PEAVerbose) {
1621 PEAState& as = jvms()->alloc_state();
1622 as.print_on(tty);
1623 }
1624 #endif
1625 assert(block()->is_merged(), "must be merged before being parsed");
1626 block()->mark_parsed();
1627
1628 // Set iterator to start of block.
1629 iter().reset_to_bci(block()->start());
1630
1631 if (ProfileExceptionHandlers && block()->is_handler()) {
1632 ciMethodData* methodData = method()->method_data();
1633 if (methodData->is_mature()) {
1634 ciBitData data = methodData->exception_handler_bci_to_data(block()->start());
1635 if (!data.exception_handler_entered() || StressPrunedExceptionHandlers) {
1636 // dead catch block
1637 // Emit an uncommon trap instead of processing the block.
1638 set_parse_bci(block()->start());
1639 uncommon_trap(Deoptimization::Reason_unreached,
1640 Deoptimization::Action_reinterpret,
1641 nullptr, "dead catch block");
1642 return;
1643 }
1644 }
1762 if (target == nullptr) { handle_missing_successor(target_bci); return; }
1763 assert(target->is_handler(), "exceptions are handled by special blocks");
1764 int pnum = target->add_new_path();
1765 merge_common(target, pnum);
1766 }
1767
1768 //--------------------handle_missing_successor---------------------------------
1769 void Parse::handle_missing_successor(int target_bci) {
1770 #ifndef PRODUCT
1771 Block* b = block();
1772 int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1773 tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1774 #endif
1775 ShouldNotReachHere();
1776 }
1777
1778 //--------------------------merge_common---------------------------------------
1779 void Parse::merge_common(Parse::Block* target, int pnum) {
1780 if (TraceOptoParse) {
1781 tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1782 if (!target->is_merged()) {
1783 tty->print(" with empty state");
1784 } else {
1785 tty->print(" with previous state");
1786 }
1787 tty->print_cr(" on path %d", pnum);
1788 }
1789
1790 // Zap extra stack slots to top
1791 assert(sp() == target->start_sp(), "");
1792 clean_stack(sp());
1793
1794 if (!target->is_merged()) { // No prior mapping at this bci
1795 // If this path is dead, do not bother capturing it as a merge.
1796 // It is "as if" we had 1 fewer predecessors from the beginning.
1797 if (stopped()) {
1798 if (TraceOptoParse) tty->print_cr(", but path is dead and doesn't count");
1799 return;
1800 }
1801
1802 // Make a region if we know there are multiple or unpredictable inputs.
1803 // (Also, if this is a plain fall-through, we might see another region,
1804 // which must not be allowed into this block's map.)
1805 if (pnum > PhiNode::Input // Known multiple inputs.
1806 || target->is_handler() // These have unpredictable inputs.
1807 || target->is_loop_head() // Known multiple inputs
1808 || control()->is_Region()) { // We must hide this guy.
1809
1810 int current_bci = bci();
1811 set_parse_bci(target->start()); // Set target bci
1812 if (target->is_SEL_head()) {
1813 DEBUG_ONLY( target->mark_merged_backedge(block()); )
1814 if (target->start() == 0) {
1816 // there are backbranches to the method entry.
1817 add_parse_predicates();
1818 }
1819 }
1820 // Add a Region to start the new basic block. Phis will be added
1821 // later lazily.
1822 int edges = target->pred_count();
1823 if (edges < pnum) edges = pnum; // might be a new path!
1824 RegionNode *r = new RegionNode(edges+1);
1825 gvn().set_type(r, Type::CONTROL);
1826 record_for_igvn(r);
1827 // zap all inputs to null for debugging (done in Node(uint) constructor)
1828 // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1829 r->init_req(pnum, control());
1830 set_control(r);
1831 target->copy_irreducible_status_to(r, jvms());
1832 set_parse_bci(current_bci); // Restore bci
1833 }
1834
1835 // Convert the existing Parser mapping into a mapping at this bci.
1836 store_state_to(target, pnum);
1837 assert(target->is_merged(), "do not come here twice");
1838 #ifdef ASSERT
1839 target->state().validate();
1840 #endif
1841 } else { // Prior mapping at this bci
1842
1843 #ifdef ASSERT
1844 if (target->is_SEL_head()) {
1845 target->mark_merged_backedge(block());
1846 }
1847 #endif
1848 // We must not manufacture more phis if the target is already parsed.
1849 bool nophi = target->is_parsed();
1850
1851 SafePointNode* newin = map();// Hang on to incoming mapping
1852 Block* save_block = block(); // Hang on to incoming block;
1853 load_state_from(target); // Get prior mapping
1854
1855 assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1856 assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1857 assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1858 assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1859
1860 // Iterate over my current mapping and the old mapping.
1861 // Where different, insert Phi functions.
1862 // Use any existing Phi functions.
1863 assert(control()->is_Region(), "must be merging to a region");
1864 RegionNode* r = control()->as_Region();
1865
1866 // Compute where to merge into
1867 // Merge incoming control path
1868 r->init_req(pnum, newin->control());
1869
1870 if (pnum == 1) { // Last merge for this Region?
1871 if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1872 Node* result = _gvn.transform(r);
1873 if (r != result && TraceOptoParse) {
1874 tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1875 }
1876 }
1877 record_for_igvn(r);
1878 }
1879
1880 // Update all the non-control inputs to map:
1881 assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1882 bool check_elide_phi = target->is_SEL_backedge(save_block);
1883 PEAState& pred_as = newin->jvms()->alloc_state();
1884 PEAState& as = block()->state();
1885 AllocationStateMerger as_merger(as);
1886
1887 for (uint j = 1; j < newin->req(); ++j) {
1888 Node* m = map()->in(j); // Current state of target.
1889 Node* n = newin->in(j); // Incoming change to target state.
1890 PhiNode* phi;
1891 if (m->is_Phi() && m->as_Phi()->region() == r)
1892 phi = m->as_Phi();
1893 else
1894 phi = nullptr;
1895 if (m != n) { // Different; must merge
1896 switch (j) {
1897 // Frame pointer and Return Address never changes
1898 case TypeFunc::FramePtr:// Drop m, use the original value
1899 case TypeFunc::ReturnAdr:
1900 break;
1901 case TypeFunc::Memory: // Merge inputs to the MergeMem node
1902 assert(phi == nullptr, "the merge contains phis, not vice versa");
1903 merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1904 continue;
1905 default: // All normal stuff
1906 if (phi == nullptr) {
1907 const JVMState* jvms = map()->jvms();
1908 if (EliminateNestedLocks &&
1909 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1910 // BoxLock nodes are not commoning.
1911 // Use old BoxLock node as merged box.
1912 assert(newin->jvms()->is_monitor_box(j), "sanity");
1913 // This assert also tests that nodes are BoxLock.
1914 assert(BoxLockNode::same_slot(n, m), "sanity");
1915 C->gvn_replace_by(n, m);
1916 } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1917 phi = ensure_phi(j, nophi);
1918
1919 // We merges allocation state according to 5.2.4 of the dissertation
1920 // Becase C2 Parse is merging basic blocks, we have to intercept some phi creation or
1921 // PEA MergeProcessor creates duplicated phi nodes.
1922 if (DoPartialEscapeAnalysis && phi != nullptr) {
1923 PartialEscapeAnalysis* pea = PEA();
1924 as_merger.merge_at_phi_creation(pea, pred_as, phi, m, n);
1925 } // DoPartialEscapeAnalysis
1926 }
1927 }
1928 break;
1929 }
1930 }
1931 // At this point, n might be top if:
1932 // - there is no phi (because TypeFlow detected a conflict), or
1933 // - the corresponding control edges is top (a dead incoming path)
1934 // It is a bug if we create a phi which sees a garbage value on a live path.
1935
1936 if (phi != nullptr) {
1937 assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1938 assert(phi->region() == r, "");
1939
1940 phi->set_req(pnum, n); // Then add 'n' to the merge
1941 if (pnum == PhiNode::Input) {
1942 // Last merge for this Phi.
1943 // So far, Phis have had a reasonable type from ciTypeFlow.
1944 // Now _gvn will join that with the meet of current inputs.
1945 // BOTTOM is never permissible here, 'cause pessimistically
1946 // Phis of pointers cannot lose the basic pointer type.
1947 debug_only(const Type* bt1 = phi->bottom_type());
1948 assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1949 map()->set_req(j, _gvn.transform(phi));
1950 debug_only(const Type* bt2 = phi->bottom_type());
1951 assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1952 record_for_igvn(phi);
1953 }
1954 }
1955 } // End of for all values to be merged
1956
1957
1958 if (DoPartialEscapeAnalysis) {
1959 as_merger.merge(pred_as, this, r, pnum);
1960 }
1961
1962 if (pnum == PhiNode::Input &&
1963 !r->in(0)) { // The occasional useless Region
1964 assert(control() == r, "");
1965 set_control(r->nonnull_req());
1966 }
1967
1968 map()->merge_replaced_nodes_with(newin);
1969
1970 #ifdef ASSERT
1971 block()->state().validate();
1972 #endif
1973 // newin has been subsumed into the lazy merge, and is now dead.
1974 set_block(save_block);
1975
1976 stop(); // done with this guy, for now
1977 }
1978
1979 // Done with this parser state.
1980 assert(stopped(), "");
1981 }
1982
1983
1984 //--------------------------merge_memory_edges---------------------------------
1985 void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) {
1986 // (nophi means we must not create phis, because we already parsed here)
1987 assert(n != nullptr, "");
1988 // Merge the inputs to the MergeMems
1989 MergeMemNode* m = merged_memory();
1990
1991 assert(control()->is_Region(), "must be merging to a region");
1992 RegionNode* r = control()->as_Region();
1993
1994 PhiNode* base = nullptr;
1995 MergeMemNode* remerge = nullptr;
1996 for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) {
1997 Node *p = mms.force_memory();
1998 Node *q = mms.memory2();
2076 // a monitor object is the subject of a replace_in_map operation.
2077 // See bugs 4426707 and 5043395.
2078 for (uint m = 0; m < nof_monitors; m++) {
2079 ensure_phi(map()->jvms()->monitor_obj_offset(m));
2080 }
2081 }
2082
2083
2084 //-----------------------------add_new_path------------------------------------
2085 // Add a previously unaccounted predecessor to this block.
2086 int Parse::Block::add_new_path() {
2087 // If there is no map, return the lowest unused path number.
2088 if (!is_merged()) return pred_count()+1; // there will be a map shortly
2089
2090 SafePointNode* map = start_map();
2091 if (!map->control()->is_Region())
2092 return pred_count()+1; // there may be a region some day
2093 RegionNode* r = map->control()->as_Region();
2094
2095 // Add new path to the region.
2096 const uint pnum = r->req();
2097 r->add_req(nullptr);
2098
2099 for (DUIterator_Fast imax, i = r->fast_outs(imax); i < imax; i++) {
2100 Node* n = r->fast_out(i);
2101
2102 if (n->is_MergeMem()) {
2103 // Ensure a phi on all currently known memories.
2104 for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2105 Node* phi = mms.memory();
2106 if (phi->is_Phi() && phi->as_Phi()->region() == r && phi->req() <= pnum) {
2107 assert(phi->req() == pnum, "must be same size as region");
2108 phi->add_req(nullptr);
2109 }
2110 }
2111 } else if (n->is_Phi() && n->as_Phi()->region() == r && n->req() <= pnum) {
2112 assert(n->req() == pnum, "must be same size as region");
2113 n->add_req(nullptr);
2114 }
2115 }
2116
2117 return pnum;
2118 }
2119
2120 //------------------------------ensure_phi-------------------------------------
2121 // Turn the idx'th entry of the current map into a Phi
2122 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2123 SafePointNode* map = this->map();
2124 Node* region = map->control();
2125 assert(region->is_Region(), "");
2126
2127 Node* o = map->in(idx);
2128 assert(o != nullptr, "");
2129
2130 if (o == top()) return nullptr; // TOP always merges into TOP
2131
2132 if (o->is_Phi() && o->as_Phi()->region() == region) {
2133 return o->as_Phi();
2152
2153 // If the type falls to bottom, then this must be a local that
2154 // is mixing ints and oops or some such. Forcing it to top
2155 // makes it go dead.
2156 if (t == Type::BOTTOM) {
2157 map->set_req(idx, top());
2158 return nullptr;
2159 }
2160
2161 // Do not create phis for top either.
2162 // A top on a non-null control flow must be an unused even after the.phi.
2163 if (t == Type::TOP || t == Type::HALF) {
2164 map->set_req(idx, top());
2165 return nullptr;
2166 }
2167
2168 PhiNode* phi = PhiNode::make(region, o, t);
2169 gvn().set_type(phi, t);
2170 if (C->do_escape_analysis()) record_for_igvn(phi);
2171 map->set_req(idx, phi);
2172
2173 return phi;
2174 }
2175
2176 //--------------------------ensure_memory_phi----------------------------------
2177 // Turn the idx'th slice of the current memory into a Phi
2178 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2179 MergeMemNode* mem = merged_memory();
2180 Node* region = control();
2181 assert(region->is_Region(), "");
2182
2183 Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2184 assert(o != nullptr && o != top(), "");
2185
2186 PhiNode* phi;
2187 if (o->is_Phi() && o->as_Phi()->region() == region) {
2188 phi = o->as_Phi();
2189 if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2190 // clone the shared base memory phi to make a new memory split
2191 assert(!nocreate, "Cannot build a phi for a block already parsed.");
2192 const Type* t = phi->bottom_type();
2354 // get a copy of the base memory, and patch just this one input
2355 const TypePtr* adr_type = mms.adr_type(C);
2356 Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2357 assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2358 gvn().set_type_bottom(phi);
2359 phi->del_req(phi->req()-1); // prepare to re-patch
2360 mms.set_memory(phi);
2361 }
2362 mms.memory()->add_req(mms.memory2());
2363 }
2364
2365 // frame pointer is always same, already captured
2366 if (value != nullptr) {
2367 // If returning oops to an interface-return, there is a silent free
2368 // cast from oop to interface allowed by the Verifier. Make it explicit
2369 // here.
2370 Node* phi = _exits.argument(0);
2371 phi->add_req(value);
2372 }
2373
2374 if (_first_return++ == 0) {
2375 _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2376 // copy assignment
2377 _exits.jvms()->alloc_state() = jvms()->alloc_state();
2378 } else {
2379 _exits.map()->merge_replaced_nodes_with(map());
2380
2381 if (DoPartialEscapeAnalysis) {
2382 PEAState& as =_exits.jvms()->alloc_state();
2383 PEAState& newin = jvms()->alloc_state();
2384 AllocationStateMerger mp(as);
2385 // if value is a tracking object and PEA needs to create a phi node to merge it,
2386 // we need to use _exits.argument(0)
2387 ObjID obj = PEA()->is_alias(value);
2388 if (as.contains(obj) && newin.contains(obj)) {
2389 Node* phi = _exits.argument(0);
2390 mp.merge_at_phi_creation(PEA(), newin, phi->as_Phi(), phi->in(_first_return-1), value);
2391 }
2392 mp.merge(newin, &_exits, _exits.control()->as_Region(), _first_return);
2393 }
2394 }
2395
2396 stop_and_kill_map(); // This CFG path dies here
2397 }
2398
2399
2400 //------------------------------add_safepoint----------------------------------
2401 void Parse::add_safepoint() {
2402 uint parms = TypeFunc::Parms+1;
2403
2404 // Clear out dead values from the debug info.
2405 kill_dead_locals();
2406
2407 // Clone the JVM State
2408 SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
2409
2410 // Capture memory state BEFORE a SafePoint. Since we can block at a
2411 // SafePoint we need our GC state to be safe; i.e. we need all our current
2412 // write barriers (card marks) to not float down after the SafePoint so we
2413 // must read raw memory. Likewise we need all oop stores to match the card
|