< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compileLog.hpp"
  26 #include "interpreter/linkResolver.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/method.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/castnode.hpp"

  32 #include "opto/idealGraphPrinter.hpp"

  33 #include "opto/locknode.hpp"
  34 #include "opto/memnode.hpp"
  35 #include "opto/opaquenode.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "opto/type.hpp"

  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/copy.hpp"
  45 
  46 // Static array so we can figure out which bytecodes stop us from compiling
  47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  49 
  50 #ifndef PRODUCT
  51 uint nodes_created             = 0;
  52 uint methods_parsed            = 0;
  53 uint methods_seen              = 0;
  54 uint blocks_parsed             = 0;
  55 uint blocks_seen               = 0;
  56 
  57 uint explicit_null_checks_inserted = 0;
  58 uint explicit_null_checks_elided   = 0;
  59 uint all_null_checks_found         = 0;

  84   }
  85   if (all_null_checks_found) {
  86     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  87                   (100*implicit_null_checks)/all_null_checks_found);
  88   }
  89   if (SharedRuntime::_implicit_null_throws) {
  90     tty->print_cr("%u implicit null exceptions at runtime",
  91                   SharedRuntime::_implicit_null_throws);
  92   }
  93 
  94   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  95     BytecodeParseHistogram::print();
  96   }
  97 }
  98 #endif
  99 
 100 //------------------------------ON STACK REPLACEMENT---------------------------
 101 
 102 // Construct a node which can be used to get incoming state for
 103 // on stack replacement.
 104 Node *Parse::fetch_interpreter_state(int index,
 105                                      BasicType bt,
 106                                      Node *local_addrs,
 107                                      Node *local_addrs_base) {






 108   Node *mem = memory(Compile::AliasIdxRaw);
 109   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 110   Node *ctl = control();
 111 
 112   // Very similar to LoadNode::make, except we handle un-aligned longs and
 113   // doubles on Sparc.  Intel can handle them just fine directly.
 114   Node *l = nullptr;
 115   switch (bt) {                // Signature is flattened
 116   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 117   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 118   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 119   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 120   case T_LONG:
 121   case T_DOUBLE: {
 122     // Since arguments are in reverse order, the argument address 'adr'
 123     // refers to the back half of the long/double.  Recompute adr.
 124     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 125     if (Matcher::misaligned_doubles_ok) {
 126       l = (bt == T_DOUBLE)
 127         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 128         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 129     } else {
 130       l = (bt == T_DOUBLE)
 131         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 132         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 133     }
 134     break;
 135   }
 136   default: ShouldNotReachHere();
 137   }
 138   return _gvn.transform(l);
 139 }
 140 
 141 // Helper routine to prevent the interpreter from handing
 142 // unexpected typestate to an OSR method.
 143 // The Node l is a value newly dug out of the interpreter frame.
 144 // The type is the type predicted by ciTypeFlow.  Note that it is
 145 // not a general type, but can only come from Type::get_typeflow_type.
 146 // The safepoint is a map which will feed an uncommon trap.
 147 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 148                                     SafePointNode* &bad_type_exit) {
 149 
 150   const TypeOopPtr* tp = type->isa_oopptr();
 151 
 152   // TypeFlow may assert null-ness if a type appears unloaded.
 153   if (type == TypePtr::NULL_PTR ||
 154       (tp != nullptr && !tp->is_loaded())) {
 155     // Value must be null, not a real oop.
 156     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 157     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 158     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 159     set_control(_gvn.transform( new IfTrueNode(iff) ));
 160     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 161     bad_type_exit->control()->add_req(bad_type);
 162     l = null();
 163   }
 164 
 165   // Typeflow can also cut off paths from the CFG, based on
 166   // types which appear unloaded, or call sites which appear unlinked.
 167   // When paths are cut off, values at later merge points can rise
 168   // toward more specific classes.  Make sure these specific classes
 169   // are still in effect.
 170   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 171     // TypeFlow asserted a specific object type.  Value must have that type.
 172     Node* bad_type_ctrl = nullptr;
 173     l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);







 174     bad_type_exit->control()->add_req(bad_type_ctrl);
 175   }
 176 
 177   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 178   return l;
 179 }
 180 
 181 // Helper routine which sets up elements of the initial parser map when
 182 // performing a parse for on stack replacement.  Add values into map.
 183 // The only parameter contains the address of a interpreter arguments.
 184 void Parse::load_interpreter_state(Node* osr_buf) {
 185   int index;
 186   int max_locals = jvms()->loc_size();
 187   int max_stack  = jvms()->stk_size();
 188 
 189 
 190   // Mismatch between method and jvms can occur since map briefly held
 191   // an OSR entry state (which takes up one RawPtr word).
 192   assert(max_locals == method()->max_locals(), "sanity");
 193   assert(max_stack  >= method()->max_stack(),  "sanity");
 194   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 195   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 196 
 197   // Find the start block.
 198   Block* osr_block = start_block();
 199   assert(osr_block->start() == osr_bci(), "sanity");
 200 
 201   // Set initial BCI.
 202   set_parse_bci(osr_block->start());
 203 
 204   // Set initial stack depth.
 205   set_sp(osr_block->start_sp());
 206 
 207   // Check bailouts.  We currently do not perform on stack replacement
 208   // of loops in catch blocks or loops which branch with a non-empty stack.
 209   if (sp() != 0) {

 224   for (index = 0; index < mcnt; index++) {
 225     // Make a BoxLockNode for the monitor.
 226     BoxLockNode* osr_box = new BoxLockNode(next_monitor());
 227     // Check for bailout after new BoxLockNode
 228     if (failing()) { return; }
 229 
 230     // This OSR locking region is unbalanced because it does not have Lock node:
 231     // locking was done in Interpreter.
 232     // This is similar to Coarsened case when Lock node is eliminated
 233     // and as result the region is marked as Unbalanced.
 234 
 235     // Emulate Coarsened state transition from Regular to Unbalanced.
 236     osr_box->set_coarsened();
 237     osr_box->set_unbalanced();
 238 
 239     Node* box = _gvn.transform(osr_box);
 240 
 241     // Displaced headers and locked objects are interleaved in the
 242     // temp OSR buffer.  We only copy the locked objects out here.
 243     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 244     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
 245     // Try and copy the displaced header to the BoxNode
 246     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 247 
 248 
 249     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
 250 
 251     // Build a bogus FastLockNode (no code will be generated) and push the
 252     // monitor into our debug info.
 253     const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
 254     map()->push_monitor(flock);
 255 
 256     // If the lock is our method synchronization lock, tuck it away in
 257     // _sync_lock for return and rethrow exit paths.
 258     if (index == 0 && method()->is_synchronized()) {
 259       _synch_lock = flock;
 260     }
 261   }
 262 
 263   // Use the raw liveness computation to make sure that unexpected
 264   // values don't propagate into the OSR frame.
 265   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 266   if (!live_locals.is_valid()) {
 267     // Degenerate or breakpointed method.

 295         if (C->log() != nullptr) {
 296           C->log()->elem("OSR_mismatch local_index='%d'",index);
 297         }
 298         set_local(index, null());
 299         // and ignore it for the loads
 300         continue;
 301       }
 302     }
 303 
 304     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 305     if (type == Type::TOP || type == Type::HALF) {
 306       continue;
 307     }
 308     // If the type falls to bottom, then this must be a local that
 309     // is mixing ints and oops or some such.  Forcing it to top
 310     // makes it go dead.
 311     if (type == Type::BOTTOM) {
 312       continue;
 313     }
 314     // Construct code to access the appropriate local.
 315     BasicType bt = type->basic_type();
 316     if (type == TypePtr::NULL_PTR) {
 317       // Ptr types are mixed together with T_ADDRESS but null is
 318       // really for T_OBJECT types so correct it.
 319       bt = T_OBJECT;
 320     }
 321     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
 322     set_local(index, value);
 323   }
 324 
 325   // Extract the needed stack entries from the interpreter frame.
 326   for (index = 0; index < sp(); index++) {
 327     const Type *type = osr_block->stack_type_at(index);
 328     if (type != Type::TOP) {
 329       // Currently the compiler bails out when attempting to on stack replace
 330       // at a bci with a non-empty stack.  We should not reach here.
 331       ShouldNotReachHere();
 332     }
 333   }
 334 
 335   // End the OSR migration
 336   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 337                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 338                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 339                     osr_buf);
 340 
 341   // Now that the interpreter state is loaded, make sure it will match

 352     if (type->isa_oopptr() != nullptr) {
 353       if (!live_oops.at(index)) {
 354         // skip type check for dead oops
 355         continue;
 356       }
 357     }
 358     if (osr_block->flow()->local_type_at(index)->is_return_address()) {
 359       // In our current system it's illegal for jsr addresses to be
 360       // live into an OSR entry point because the compiler performs
 361       // inlining of jsrs.  ciTypeFlow has a bailout that detect this
 362       // case and aborts the compile if addresses are live into an OSR
 363       // entry point.  Because of that we can assume that any address
 364       // locals at the OSR entry point are dead.  Method liveness
 365       // isn't precise enough to figure out that they are dead in all
 366       // cases so simply skip checking address locals all
 367       // together. Any type check is guaranteed to fail since the
 368       // interpreter type is the result of a load which might have any
 369       // value and the expected type is a constant.
 370       continue;
 371     }
 372     set_local(index, check_interpreter_type(l, type, bad_type_exit));






 373   }
 374 
 375   for (index = 0; index < sp(); index++) {
 376     if (stopped())  break;
 377     Node* l = stack(index);
 378     if (l->is_top())  continue;  // nothing here
 379     const Type *type = osr_block->stack_type_at(index);
 380     set_stack(index, check_interpreter_type(l, type, bad_type_exit));






 381   }
 382 
 383   if (bad_type_exit->control()->req() > 1) {
 384     // Build an uncommon trap here, if any inputs can be unexpected.
 385     bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
 386     record_for_igvn(bad_type_exit->control());
 387     SafePointNode* types_are_good = map();
 388     set_map(bad_type_exit);
 389     // The unexpected type happens because a new edge is active
 390     // in the CFG, which typeflow had previously ignored.
 391     // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
 392     // This x will be typed as Integer if notReached is not yet linked.
 393     // It could also happen due to a problem in ciTypeFlow analysis.
 394     uncommon_trap(Deoptimization::Reason_constraint,
 395                   Deoptimization::Action_reinterpret);
 396     set_map(types_are_good);
 397   }
 398 }
 399 
 400 //------------------------------Parse------------------------------------------

 501   // either breakpoint setting or hotswapping of methods may
 502   // cause deoptimization.
 503   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 504     C->dependencies()->assert_evol_method(method());
 505   }
 506 
 507   NOT_PRODUCT(methods_seen++);
 508 
 509   // Do some special top-level things.
 510   if (depth() == 1 && C->is_osr_compilation()) {
 511     _tf = C->tf();     // the OSR entry type is different
 512     _entry_bci = C->entry_bci();
 513     _flow = method()->get_osr_flow_analysis(osr_bci());
 514   } else {
 515     _tf = TypeFunc::make(method());
 516     _entry_bci = InvocationEntryBci;
 517     _flow = method()->get_flow_analysis();
 518   }
 519 
 520   if (_flow->failing()) {
 521     assert(false, "type flow analysis failed during parsing");


 522     C->record_method_not_compilable(_flow->failure_reason());
 523 #ifndef PRODUCT
 524       if (PrintOpto && (Verbose || WizardMode)) {
 525         if (is_osr_parse()) {
 526           tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 527         } else {
 528           tty->print_cr("type flow bailout: %s", _flow->failure_reason());
 529         }
 530         if (Verbose) {
 531           method()->print();
 532           method()->print_codes();
 533           _flow->print();
 534         }
 535       }
 536 #endif
 537   }
 538 
 539 #ifdef ASSERT
 540   if (depth() == 1) {
 541     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");

 592     load_interpreter_state(osr_buf);
 593   } else {
 594     set_map(entry_map);
 595     do_method_entry();
 596   }
 597 
 598   if (depth() == 1 && !failing()) {
 599     if (C->clinit_barrier_on_entry()) {
 600       // Add check to deoptimize the nmethod once the holder class is fully initialized
 601       clinit_deopt();
 602     }
 603   }
 604 
 605   // Check for bailouts during method entry.
 606   if (failing()) {
 607     if (log)  log->done("parse");
 608     C->set_default_node_notes(caller_nn);
 609     return;
 610   }
 611 


























 612   entry_map = map();  // capture any changes performed by method setup code
 613   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 614 
 615   // We begin parsing as if we have just encountered a jump to the
 616   // method entry.
 617   Block* entry_block = start_block();
 618   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 619   set_map_clone(entry_map);
 620   merge_common(entry_block, entry_block->next_path_num());
 621 
 622 #ifndef PRODUCT
 623   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 624   set_parse_histogram( parse_histogram_obj );
 625 #endif
 626 
 627   // Parse all the basic blocks.
 628   do_all_blocks();
 629 
 630   // Check for bailouts during conversion to graph
 631   if (failing()) {

 777 void Parse::build_exits() {
 778   // make a clone of caller to prevent sharing of side-effects
 779   _exits.set_map(_exits.clone_map());
 780   _exits.clean_stack(_exits.sp());
 781   _exits.sync_jvms();
 782 
 783   RegionNode* region = new RegionNode(1);
 784   record_for_igvn(region);
 785   gvn().set_type_bottom(region);
 786   _exits.set_control(region);
 787 
 788   // Note:  iophi and memphi are not transformed until do_exits.
 789   Node* iophi  = new PhiNode(region, Type::ABIO);
 790   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 791   gvn().set_type_bottom(iophi);
 792   gvn().set_type_bottom(memphi);
 793   _exits.set_i_o(iophi);
 794   _exits.set_all_memory(memphi);
 795 
 796   // Add a return value to the exit state.  (Do not push it yet.)
 797   if (tf()->range()->cnt() > TypeFunc::Parms) {
 798     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
 799     if (ret_type->isa_int()) {
 800       BasicType ret_bt = method()->return_type()->basic_type();
 801       if (ret_bt == T_BOOLEAN ||
 802           ret_bt == T_CHAR ||
 803           ret_bt == T_BYTE ||
 804           ret_bt == T_SHORT) {
 805         ret_type = TypeInt::INT;
 806       }
 807     }
 808 
 809     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 810     // becomes loaded during the subsequent parsing, the loaded and unloaded
 811     // types will not join when we transform and push in do_exits().
 812     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 813     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 814       ret_type = TypeOopPtr::BOTTOM;
 815     }
 816     int         ret_size = type2size[ret_type->basic_type()];
 817     Node*       ret_phi  = new PhiNode(region, ret_type);
 818     gvn().set_type_bottom(ret_phi);
 819     _exits.ensure_stack(ret_size);
 820     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 821     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 822     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 823     // Note:  ret_phi is not yet pushed, until do_exits.
 824   }
 825 }
 826 
 827 
 828 //----------------------------build_start_state-------------------------------
 829 // Construct a state which contains only the incoming arguments from an
 830 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 831 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 832   int        arg_size = tf->domain()->cnt();
 833   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
 834   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 835   SafePointNode* map  = new SafePointNode(max_size, jvms);

 836   record_for_igvn(map);
 837   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 838   Node_Notes* old_nn = default_node_notes();
 839   if (old_nn != nullptr && has_method()) {
 840     Node_Notes* entry_nn = old_nn->clone(this);
 841     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 842     entry_jvms->set_offsets(0);
 843     entry_jvms->set_bci(entry_bci());
 844     entry_nn->set_jvms(entry_jvms);
 845     set_default_node_notes(entry_nn);
 846   }
 847   uint i;
 848   for (i = 0; i < (uint)arg_size; i++) {
 849     Node* parm = initial_gvn()->transform(new ParmNode(start, i));

















 850     map->init_req(i, parm);
 851     // Record all these guys for later GVN.
 852     record_for_igvn(parm);



 853   }
 854   for (; i < map->req(); i++) {
 855     map->init_req(i, top());
 856   }
 857   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 858   set_default_node_notes(old_nn);
 859   jvms->set_map(map);
 860   return jvms;
 861 }
 862 
 863 //-----------------------------make_node_notes---------------------------------
 864 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 865   if (caller_nn == nullptr)  return nullptr;
 866   Node_Notes* nn = caller_nn->clone(C);
 867   JVMState* caller_jvms = nn->jvms();
 868   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 869   jvms->set_offsets(0);
 870   jvms->set_bci(_entry_bci);
 871   nn->set_jvms(jvms);
 872   return nn;
 873 }
 874 
 875 
 876 //--------------------------return_values--------------------------------------
 877 void Compile::return_values(JVMState* jvms) {
 878   GraphKit kit(jvms);
 879   Node* ret = new ReturnNode(TypeFunc::Parms,
 880                              kit.control(),
 881                              kit.i_o(),
 882                              kit.reset_memory(),
 883                              kit.frameptr(),
 884                              kit.returnadr());
 885   // Add zero or 1 return values
 886   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
 887   if (ret_size > 0) {
 888     kit.inc_sp(-ret_size);  // pop the return value(s)
 889     kit.sync_jvms();
 890     ret->add_req(kit.argument(0));
 891     // Note:  The second dummy edge is not needed by a ReturnNode.






















 892   }
 893   // bind it to root
 894   root()->add_req(ret);
 895   record_for_igvn(ret);
 896   initial_gvn()->transform(ret);
 897 }
 898 
 899 //------------------------rethrow_exceptions-----------------------------------
 900 // Bind all exception states in the list into a single RethrowNode.
 901 void Compile::rethrow_exceptions(JVMState* jvms) {
 902   GraphKit kit(jvms);
 903   if (!kit.has_exceptions())  return;  // nothing to generate
 904   // Load my combined exception state into the kit, with all phis transformed:
 905   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 906   Node* ex_oop = kit.use_exception_state(ex_map);
 907   RethrowNode* exit = new RethrowNode(kit.control(),
 908                                       kit.i_o(), kit.reset_memory(),
 909                                       kit.frameptr(), kit.returnadr(),
 910                                       // like a return but with exception input
 911                                       ex_oop);

 995   //    to complete, we force all writes to complete.
 996   //
 997   // 2. Experimental VM option is used to force the barrier if any field
 998   //    was written out in the constructor.
 999   //
1000   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1001   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1002   //    MemBarVolatile is used before volatile load instead of after volatile
1003   //    store, so there's no barrier after the store.
1004   //    We want to guarantee the same behavior as on platforms with total store
1005   //    order, although this is not required by the Java memory model.
1006   //    In this case, we want to enforce visibility of volatile field
1007   //    initializations which are performed in constructors.
1008   //    So as with finals, we add a barrier here.
1009   //
1010   // "All bets are off" unless the first publication occurs after a
1011   // normal return from the constructor.  We do not attempt to detect
1012   // such unusual early publications.  But no barrier is needed on
1013   // exceptional returns, since they cannot publish normally.
1014   //
1015   if (method()->is_object_initializer() &&
1016        (wrote_final() || wrote_stable() ||
1017          (AlwaysSafeConstructors && wrote_fields()) ||
1018          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1019     Node* recorded_alloc = alloc_with_final_or_stable();
1020     _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1021                           recorded_alloc);
1022 
1023     // If Memory barrier is created for final fields write
1024     // and allocation node does not escape the initialize method,
1025     // then barrier introduced by allocation node can be removed.
1026     if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1027       AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1028       alloc->compute_MemBar_redundancy(method());
1029     }
1030     if (PrintOpto && (Verbose || WizardMode)) {
1031       method()->print_name();
1032       tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1033     }
1034   }
1035 
1036   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1037     // transform each slice of the original memphi:
1038     mms.set_memory(_gvn.transform(mms.memory()));
1039   }
1040   // Clean up input MergeMems created by transforming the slices
1041   _gvn.transform(_exits.merged_memory());
1042 
1043   if (tf()->range()->cnt() > TypeFunc::Parms) {
1044     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1045     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1046     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1047       // If the type we set for the ret_phi in build_exits() is too optimistic and
1048       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1049       // loading.  It could also be due to an error, so mark this method as not compilable because
1050       // otherwise this could lead to an infinite compile loop.
1051       // In any case, this code path is rarely (and never in my testing) reached.
1052       C->record_method_not_compilable("Can't determine return type.");
1053       return;
1054     }
1055     if (ret_type->isa_int()) {
1056       BasicType ret_bt = method()->return_type()->basic_type();
1057       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1058     }
1059     _exits.push_node(ret_type->basic_type(), ret_phi);
1060   }
1061 
1062   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1063 
1064   // Unlock along the exceptional paths.

1118 
1119 //-----------------------------create_entry_map-------------------------------
1120 // Initialize our parser map to contain the types at method entry.
1121 // For OSR, the map contains a single RawPtr parameter.
1122 // Initial monitor locking for sync. methods is performed by do_method_entry.
1123 SafePointNode* Parse::create_entry_map() {
1124   // Check for really stupid bail-out cases.
1125   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1126   if (len >= 32760) {
1127     // Bailout expected, this is a very rare edge case.
1128     C->record_method_not_compilable("too many local variables");
1129     return nullptr;
1130   }
1131 
1132   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1133   _caller->map()->delete_replaced_nodes();
1134 
1135   // If this is an inlined method, we may have to do a receiver null check.
1136   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1137     GraphKit kit(_caller);
1138     kit.null_check_receiver_before_call(method());

1139     _caller = kit.transfer_exceptions_into_jvms();

1140     if (kit.stopped()) {
1141       _exits.add_exception_states_from(_caller);
1142       _exits.set_jvms(_caller);
1143       return nullptr;
1144     }
1145   }
1146 
1147   assert(method() != nullptr, "parser must have a method");
1148 
1149   // Create an initial safepoint to hold JVM state during parsing
1150   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1151   set_map(new SafePointNode(len, jvms));
1152 
1153   // Capture receiver info for compiled lambda forms.
1154   if (method()->is_compiled_lambda_form()) {
1155     ciInstance* recv_info = _caller->compute_receiver_info(method());
1156     jvms->set_receiver_info(recv_info);
1157   }
1158 
1159   jvms->set_map(map());

1163   SafePointNode* inmap = _caller->map();
1164   assert(inmap != nullptr, "must have inmap");
1165   // In case of null check on receiver above
1166   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1167 
1168   uint i;
1169 
1170   // Pass thru the predefined input parameters.
1171   for (i = 0; i < TypeFunc::Parms; i++) {
1172     map()->init_req(i, inmap->in(i));
1173   }
1174 
1175   if (depth() == 1) {
1176     assert(map()->memory()->Opcode() == Op_Parm, "");
1177     // Insert the memory aliasing node
1178     set_all_memory(reset_memory());
1179   }
1180   assert(merged_memory(), "");
1181 
1182   // Now add the locals which are initially bound to arguments:
1183   uint arg_size = tf()->domain()->cnt();
1184   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1185   for (i = TypeFunc::Parms; i < arg_size; i++) {
1186     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1187   }
1188 
1189   // Clear out the rest of the map (locals and stack)
1190   for (i = arg_size; i < len; i++) {
1191     map()->init_req(i, top());
1192   }
1193 
1194   SafePointNode* entry_map = stop();
1195   return entry_map;
1196 }
1197 
1198 //-----------------------------do_method_entry--------------------------------
1199 // Emit any code needed in the pseudo-block before BCI zero.
1200 // The main thing to do is lock the receiver of a synchronized method.
1201 void Parse::do_method_entry() {
1202   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1203   set_sp(0);                         // Java Stack Pointer
1204 
1205   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1206 






























1207   if (C->env()->dtrace_method_probes()) {
1208     make_dtrace_method_entry(method());
1209   }
1210 
1211 #ifdef ASSERT
1212   // Narrow receiver type when it is too broad for the method being parsed.
1213   if (!method()->is_static()) {
1214     ciInstanceKlass* callee_holder = method()->holder();
1215     const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1216 
1217     Node* receiver_obj = local(0);
1218     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1219 
1220     if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1221       // Receiver should always be a subtype of callee holder.
1222       // But, since C2 type system doesn't properly track interfaces,
1223       // the invariant can't be expressed in the type system for default methods.
1224       // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1225       assert(callee_holder->is_interface(), "missing subtype check");
1226 

1237 
1238   // If the method is synchronized, we need to construct a lock node, attach
1239   // it to the Start node, and pin it there.
1240   if (method()->is_synchronized()) {
1241     // Insert a FastLockNode right after the Start which takes as arguments
1242     // the current thread pointer, the "this" pointer & the address of the
1243     // stack slot pair used for the lock.  The "this" pointer is a projection
1244     // off the start node, but the locking spot has to be constructed by
1245     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1246     // becomes the second argument to the FastLockNode call.  The
1247     // FastLockNode becomes the new control parent to pin it to the start.
1248 
1249     // Setup Object Pointer
1250     Node *lock_obj = nullptr;
1251     if (method()->is_static()) {
1252       ciInstance* mirror = _method->holder()->java_mirror();
1253       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1254       lock_obj = makecon(t_lock);
1255     } else {                  // Else pass the "this" pointer,
1256       lock_obj = local(0);    // which is Parm0 from StartNode

1257     }
1258     // Clear out dead values from the debug info.
1259     kill_dead_locals();
1260     // Build the FastLockNode
1261     _synch_lock = shared_lock(lock_obj);
1262     // Check for bailout in shared_lock
1263     if (failing()) { return; }
1264   }
1265 
1266   // Feed profiling data for parameters to the type system so it can
1267   // propagate it as speculative types
1268   record_profiled_parameters_for_speculation();
1269 }
1270 
1271 //------------------------------init_blocks------------------------------------
1272 // Initialize our parser map to contain the types/monitors at method entry.
1273 void Parse::init_blocks() {
1274   // Create the blocks.
1275   _block_count = flow()->block_count();
1276   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);

1672 //--------------------handle_missing_successor---------------------------------
1673 void Parse::handle_missing_successor(int target_bci) {
1674 #ifndef PRODUCT
1675   Block* b = block();
1676   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1677   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1678 #endif
1679   ShouldNotReachHere();
1680 }
1681 
1682 //--------------------------merge_common---------------------------------------
1683 void Parse::merge_common(Parse::Block* target, int pnum) {
1684   if (TraceOptoParse) {
1685     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1686   }
1687 
1688   // Zap extra stack slots to top
1689   assert(sp() == target->start_sp(), "");
1690   clean_stack(sp());
1691 


































































1692   if (!target->is_merged()) {   // No prior mapping at this bci
1693     if (TraceOptoParse) { tty->print(" with empty state");  }
1694 
1695     // If this path is dead, do not bother capturing it as a merge.
1696     // It is "as if" we had 1 fewer predecessors from the beginning.
1697     if (stopped()) {
1698       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1699       return;
1700     }
1701 
1702     // Make a region if we know there are multiple or unpredictable inputs.
1703     // (Also, if this is a plain fall-through, we might see another region,
1704     // which must not be allowed into this block's map.)
1705     if (pnum > PhiNode::Input         // Known multiple inputs.
1706         || target->is_handler()       // These have unpredictable inputs.
1707         || target->is_loop_head()     // Known multiple inputs
1708         || control()->is_Region()) {  // We must hide this guy.
1709 
1710       int current_bci = bci();
1711       set_parse_bci(target->start()); // Set target bci

1726       record_for_igvn(r);
1727       // zap all inputs to null for debugging (done in Node(uint) constructor)
1728       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1729       r->init_req(pnum, control());
1730       set_control(r);
1731       target->copy_irreducible_status_to(r, jvms());
1732       set_parse_bci(current_bci); // Restore bci
1733     }
1734 
1735     // Convert the existing Parser mapping into a mapping at this bci.
1736     store_state_to(target);
1737     assert(target->is_merged(), "do not come here twice");
1738 
1739   } else {                      // Prior mapping at this bci
1740     if (TraceOptoParse) {  tty->print(" with previous state"); }
1741 #ifdef ASSERT
1742     if (target->is_SEL_head()) {
1743       target->mark_merged_backedge(block());
1744     }
1745 #endif

1746     // We must not manufacture more phis if the target is already parsed.
1747     bool nophi = target->is_parsed();
1748 
1749     SafePointNode* newin = map();// Hang on to incoming mapping
1750     Block* save_block = block(); // Hang on to incoming block;
1751     load_state_from(target);    // Get prior mapping
1752 
1753     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1754     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1755     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1756     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1757 
1758     // Iterate over my current mapping and the old mapping.
1759     // Where different, insert Phi functions.
1760     // Use any existing Phi functions.
1761     assert(control()->is_Region(), "must be merging to a region");
1762     RegionNode* r = control()->as_Region();
1763 
1764     // Compute where to merge into
1765     // Merge incoming control path
1766     r->init_req(pnum, newin->control());
1767 
1768     if (pnum == 1) {            // Last merge for this Region?
1769       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1770         Node* result = _gvn.transform(r);
1771         if (r != result && TraceOptoParse) {
1772           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1773         }
1774       }
1775       record_for_igvn(r);
1776     }
1777 
1778     // Update all the non-control inputs to map:
1779     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1780     bool check_elide_phi = target->is_SEL_backedge(save_block);

1781     for (uint j = 1; j < newin->req(); j++) {
1782       Node* m = map()->in(j);   // Current state of target.
1783       Node* n = newin->in(j);   // Incoming change to target state.
1784       PhiNode* phi;
1785       if (m->is_Phi() && m->as_Phi()->region() == r)
1786         phi = m->as_Phi();
1787       else


1788         phi = nullptr;

1789       if (m != n) {             // Different; must merge
1790         switch (j) {
1791         // Frame pointer and Return Address never changes
1792         case TypeFunc::FramePtr:// Drop m, use the original value
1793         case TypeFunc::ReturnAdr:
1794           break;
1795         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1796           assert(phi == nullptr, "the merge contains phis, not vice versa");
1797           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1798           continue;
1799         default:                // All normal stuff
1800           if (phi == nullptr) {
1801             const JVMState* jvms = map()->jvms();
1802             if (EliminateNestedLocks &&
1803                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1804               // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1805               // Use old BoxLock node as merged box.
1806               assert(newin->jvms()->is_monitor_box(j), "sanity");
1807               // This assert also tests that nodes are BoxLock.
1808               assert(BoxLockNode::same_slot(n, m), "sanity");

1815                 // Incremental Inlining before EA and Macro nodes elimination.
1816                 //
1817                 // Incremental Inlining is executed after IGVN optimizations
1818                 // during which BoxLock can be marked as Coarsened.
1819                 old_box->set_coarsened(); // Verifies state
1820                 old_box->set_unbalanced();
1821               }
1822               C->gvn_replace_by(n, m);
1823             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1824               phi = ensure_phi(j, nophi);
1825             }
1826           }
1827           break;
1828         }
1829       }
1830       // At this point, n might be top if:
1831       //  - there is no phi (because TypeFlow detected a conflict), or
1832       //  - the corresponding control edges is top (a dead incoming path)
1833       // It is a bug if we create a phi which sees a garbage value on a live path.
1834 
1835       if (phi != nullptr) {

























1836         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1837         assert(phi->region() == r, "");
1838         phi->set_req(pnum, n);  // Then add 'n' to the merge
1839         if (pnum == PhiNode::Input) {
1840           // Last merge for this Phi.
1841           // So far, Phis have had a reasonable type from ciTypeFlow.
1842           // Now _gvn will join that with the meet of current inputs.
1843           // BOTTOM is never permissible here, 'cause pessimistically
1844           // Phis of pointers cannot lose the basic pointer type.
1845           DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
1846           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1847           map()->set_req(j, _gvn.transform(phi));
1848           DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
1849           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1850           record_for_igvn(phi);
1851         }
1852       }
1853     } // End of for all values to be merged
1854 
1855     if (pnum == PhiNode::Input &&
1856         !r->in(0)) {         // The occasional useless Region
1857       assert(control() == r, "");
1858       set_control(r->nonnull_req());
1859     }
1860 
1861     map()->merge_replaced_nodes_with(newin);
1862 
1863     // newin has been subsumed into the lazy merge, and is now dead.
1864     set_block(save_block);
1865 
1866     stop();                     // done with this guy, for now
1867   }
1868 
1869   if (TraceOptoParse) {
1870     tty->print_cr(" on path %d", pnum);
1871   }
1872 
1873   // Done with this parser state.
1874   assert(stopped(), "");
1875 }
1876 

1988 
1989   // Add new path to the region.
1990   uint pnum = r->req();
1991   r->add_req(nullptr);
1992 
1993   for (uint i = 1; i < map->req(); i++) {
1994     Node* n = map->in(i);
1995     if (i == TypeFunc::Memory) {
1996       // Ensure a phi on all currently known memories.
1997       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1998         Node* phi = mms.memory();
1999         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2000           assert(phi->req() == pnum, "must be same size as region");
2001           phi->add_req(nullptr);
2002         }
2003       }
2004     } else {
2005       if (n->is_Phi() && n->as_Phi()->region() == r) {
2006         assert(n->req() == pnum, "must be same size as region");
2007         n->add_req(nullptr);


2008       }
2009     }
2010   }
2011 
2012   return pnum;
2013 }
2014 
2015 //------------------------------ensure_phi-------------------------------------
2016 // Turn the idx'th entry of the current map into a Phi
2017 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2018   SafePointNode* map = this->map();
2019   Node* region = map->control();
2020   assert(region->is_Region(), "");
2021 
2022   Node* o = map->in(idx);
2023   assert(o != nullptr, "");
2024 
2025   if (o == top())  return nullptr; // TOP always merges into TOP
2026 
2027   if (o->is_Phi() && o->as_Phi()->region() == region) {
2028     return o->as_Phi();
2029   }




2030 
2031   // Now use a Phi here for merging
2032   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2033   const JVMState* jvms = map->jvms();
2034   const Type* t = nullptr;
2035   if (jvms->is_loc(idx)) {
2036     t = block()->local_type_at(idx - jvms->locoff());
2037   } else if (jvms->is_stk(idx)) {
2038     t = block()->stack_type_at(idx - jvms->stkoff());
2039   } else if (jvms->is_mon(idx)) {
2040     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2041     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2042   } else if ((uint)idx < TypeFunc::Parms) {
2043     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2044   } else {
2045     assert(false, "no type information for this phi");
2046   }
2047 
2048   // If the type falls to bottom, then this must be a local that
2049   // is mixing ints and oops or some such.  Forcing it to top
2050   // makes it go dead.
2051   if (t == Type::BOTTOM) {
2052     map->set_req(idx, top());
2053     return nullptr;
2054   }
2055 
2056   // Do not create phis for top either.
2057   // A top on a non-null control flow must be an unused even after the.phi.
2058   if (t == Type::TOP || t == Type::HALF) {
2059     map->set_req(idx, top());
2060     return nullptr;
2061   }
2062 
2063   PhiNode* phi = PhiNode::make(region, o, t);
2064   gvn().set_type(phi, t);
2065   if (C->do_escape_analysis()) record_for_igvn(phi);
2066   map->set_req(idx, phi);
2067   return phi;









2068 }
2069 
2070 //--------------------------ensure_memory_phi----------------------------------
2071 // Turn the idx'th slice of the current memory into a Phi
2072 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2073   MergeMemNode* mem = merged_memory();
2074   Node* region = control();
2075   assert(region->is_Region(), "");
2076 
2077   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2078   assert(o != nullptr && o != top(), "");
2079 
2080   PhiNode* phi;
2081   if (o->is_Phi() && o->as_Phi()->region() == region) {
2082     phi = o->as_Phi();
2083     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2084       // clone the shared base memory phi to make a new memory split
2085       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2086       const Type* t = phi->bottom_type();
2087       const TypePtr* adr_type = C->get_adr_type(idx);

2177 // Add check to deoptimize once holder klass is fully initialized.
2178 void Parse::clinit_deopt() {
2179   assert(C->has_method(), "only for normal compilations");
2180   assert(depth() == 1, "only for main compiled method");
2181   assert(is_normal_parse(), "no barrier needed on osr entry");
2182   assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2183 
2184   set_parse_bci(0);
2185 
2186   Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2187   guard_klass_being_initialized(holder);
2188 }
2189 
2190 //------------------------------return_current---------------------------------
2191 // Append current _map to _exit_return
2192 void Parse::return_current(Node* value) {
2193   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2194     call_register_finalizer();
2195   }
2196 


































2197   // Do not set_parse_bci, so that return goo is credited to the return insn.
2198   set_bci(InvocationEntryBci);
2199   if (method()->is_synchronized()) {
2200     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2201   }
2202   if (C->env()->dtrace_method_probes()) {
2203     make_dtrace_method_exit(method());
2204   }

2205   SafePointNode* exit_return = _exits.map();
2206   exit_return->in( TypeFunc::Control  )->add_req( control() );
2207   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2208   Node *mem = exit_return->in( TypeFunc::Memory   );
2209   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2210     if (mms.is_empty()) {
2211       // get a copy of the base memory, and patch just this one input
2212       const TypePtr* adr_type = mms.adr_type(C);
2213       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2214       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2215       gvn().set_type_bottom(phi);
2216       phi->del_req(phi->req()-1);  // prepare to re-patch
2217       mms.set_memory(phi);
2218     }
2219     mms.memory()->add_req(mms.memory2());
2220   }
2221 
2222   // frame pointer is always same, already captured
2223   if (value != nullptr) {
2224     // If returning oops to an interface-return, there is a silent free
2225     // cast from oop to interface allowed by the Verifier.  Make it explicit
2226     // here.
2227     Node* phi = _exits.argument(0);
2228     phi->add_req(value);
2229   }
2230 
2231   if (_first_return) {
2232     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2233     _first_return = false;
2234   } else {
2235     _exits.map()->merge_replaced_nodes_with(map());
2236   }
2237 
2238   stop_and_kill_map();          // This CFG path dies here
2239 }
2240 
2241 
2242 //------------------------------add_safepoint----------------------------------
2243 void Parse::add_safepoint() {
2244   uint parms = TypeFunc::Parms+1;
2245 
2246   // Clear out dead values from the debug info.
2247   kill_dead_locals();
2248 
2249   // Clone the JVM State
2250   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);

  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "compiler/compileLog.hpp"
  26 #include "interpreter/linkResolver.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/method.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/idealGraphPrinter.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/locknode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/opaquenode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/type.hpp"
  42 #include "runtime/arguments.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/bitMap.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 // Static array so we can figure out which bytecodes stop us from compiling
  50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  52 
  53 #ifndef PRODUCT
  54 uint nodes_created             = 0;
  55 uint methods_parsed            = 0;
  56 uint methods_seen              = 0;
  57 uint blocks_parsed             = 0;
  58 uint blocks_seen               = 0;
  59 
  60 uint explicit_null_checks_inserted = 0;
  61 uint explicit_null_checks_elided   = 0;
  62 uint all_null_checks_found         = 0;

  87   }
  88   if (all_null_checks_found) {
  89     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  90                   (100*implicit_null_checks)/all_null_checks_found);
  91   }
  92   if (SharedRuntime::_implicit_null_throws) {
  93     tty->print_cr("%u implicit null exceptions at runtime",
  94                   SharedRuntime::_implicit_null_throws);
  95   }
  96 
  97   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  98     BytecodeParseHistogram::print();
  99   }
 100 }
 101 #endif
 102 
 103 //------------------------------ON STACK REPLACEMENT---------------------------
 104 
 105 // Construct a node which can be used to get incoming state for
 106 // on stack replacement.
 107 Node* Parse::fetch_interpreter_state(int index,
 108                                      const Type* type,
 109                                      Node* local_addrs,
 110                                      Node* local_addrs_base) {
 111   BasicType bt = type->basic_type();
 112   if (type == TypePtr::NULL_PTR) {
 113     // Ptr types are mixed together with T_ADDRESS but nullptr is
 114     // really for T_OBJECT types so correct it.
 115     bt = T_OBJECT;
 116   }
 117   Node *mem = memory(Compile::AliasIdxRaw);
 118   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 119   Node *ctl = control();
 120 
 121   // Very similar to LoadNode::make, except we handle un-aligned longs and
 122   // doubles on Sparc.  Intel can handle them just fine directly.
 123   Node *l = nullptr;
 124   switch (bt) {                // Signature is flattened
 125   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 126   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 127   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 128   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 129   case T_LONG:
 130   case T_DOUBLE: {
 131     // Since arguments are in reverse order, the argument address 'adr'
 132     // refers to the back half of the long/double.  Recompute adr.
 133     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 134     if (Matcher::misaligned_doubles_ok) {
 135       l = (bt == T_DOUBLE)
 136         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 137         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 138     } else {
 139       l = (bt == T_DOUBLE)
 140         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 141         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 142     }
 143     break;
 144   }
 145   default: ShouldNotReachHere();
 146   }
 147   return _gvn.transform(l);
 148 }
 149 
 150 // Helper routine to prevent the interpreter from handing
 151 // unexpected typestate to an OSR method.
 152 // The Node l is a value newly dug out of the interpreter frame.
 153 // The type is the type predicted by ciTypeFlow.  Note that it is
 154 // not a general type, but can only come from Type::get_typeflow_type.
 155 // The safepoint is a map which will feed an uncommon trap.
 156 Node* Parse::check_interpreter_type(Node* l, const Type* type, const TypeKlassPtr* klass_type,
 157                                     SafePointNode* &bad_type_exit, bool is_early_larval) {

 158   const TypeOopPtr* tp = type->isa_oopptr();
 159 
 160   // TypeFlow may assert null-ness if a type appears unloaded.
 161   if (type == TypePtr::NULL_PTR ||
 162       (tp != nullptr && !tp->is_loaded())) {
 163     // Value must be null, not a real oop.
 164     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 165     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 166     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 167     set_control(_gvn.transform( new IfTrueNode(iff) ));
 168     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 169     bad_type_exit->control()->add_req(bad_type);
 170     l = null();
 171   }
 172 
 173   // Typeflow can also cut off paths from the CFG, based on
 174   // types which appear unloaded, or call sites which appear unlinked.
 175   // When paths are cut off, values at later merge points can rise
 176   // toward more specific classes.  Make sure these specific classes
 177   // are still in effect.
 178   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 179     // TypeFlow asserted a specific object type.  Value must have that type.
 180     Node* bad_type_ctrl = nullptr;
 181     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
 182       // Check inline types for null here to prevent checkcast from adding an
 183       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
 184       l = null_check_oop(l, &bad_type_ctrl);
 185       bad_type_exit->control()->add_req(bad_type_ctrl);
 186     }
 187 
 188     l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, false, is_early_larval);
 189     bad_type_exit->control()->add_req(bad_type_ctrl);
 190   }
 191 
 192   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 193   return l;
 194 }
 195 
 196 // Helper routine which sets up elements of the initial parser map when
 197 // performing a parse for on stack replacement.  Add values into map.
 198 // The only parameter contains the address of a interpreter arguments.
 199 void Parse::load_interpreter_state(Node* osr_buf) {
 200   int index;
 201   int max_locals = jvms()->loc_size();
 202   int max_stack  = jvms()->stk_size();
 203 

 204   // Mismatch between method and jvms can occur since map briefly held
 205   // an OSR entry state (which takes up one RawPtr word).
 206   assert(max_locals == method()->max_locals(), "sanity");
 207   assert(max_stack  >= method()->max_stack(),  "sanity");
 208   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 209   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 210 
 211   // Find the start block.
 212   Block* osr_block = start_block();
 213   assert(osr_block->start() == osr_bci(), "sanity");
 214 
 215   // Set initial BCI.
 216   set_parse_bci(osr_block->start());
 217 
 218   // Set initial stack depth.
 219   set_sp(osr_block->start_sp());
 220 
 221   // Check bailouts.  We currently do not perform on stack replacement
 222   // of loops in catch blocks or loops which branch with a non-empty stack.
 223   if (sp() != 0) {

 238   for (index = 0; index < mcnt; index++) {
 239     // Make a BoxLockNode for the monitor.
 240     BoxLockNode* osr_box = new BoxLockNode(next_monitor());
 241     // Check for bailout after new BoxLockNode
 242     if (failing()) { return; }
 243 
 244     // This OSR locking region is unbalanced because it does not have Lock node:
 245     // locking was done in Interpreter.
 246     // This is similar to Coarsened case when Lock node is eliminated
 247     // and as result the region is marked as Unbalanced.
 248 
 249     // Emulate Coarsened state transition from Regular to Unbalanced.
 250     osr_box->set_coarsened();
 251     osr_box->set_unbalanced();
 252 
 253     Node* box = _gvn.transform(osr_box);
 254 
 255     // Displaced headers and locked objects are interleaved in the
 256     // temp OSR buffer.  We only copy the locked objects out here.
 257     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 258     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
 259     // Try and copy the displaced header to the BoxNode
 260     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);

 261 
 262     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
 263 
 264     // Build a bogus FastLockNode (no code will be generated) and push the
 265     // monitor into our debug info.
 266     const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
 267     map()->push_monitor(flock);
 268 
 269     // If the lock is our method synchronization lock, tuck it away in
 270     // _sync_lock for return and rethrow exit paths.
 271     if (index == 0 && method()->is_synchronized()) {
 272       _synch_lock = flock;
 273     }
 274   }
 275 
 276   // Use the raw liveness computation to make sure that unexpected
 277   // values don't propagate into the OSR frame.
 278   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 279   if (!live_locals.is_valid()) {
 280     // Degenerate or breakpointed method.

 308         if (C->log() != nullptr) {
 309           C->log()->elem("OSR_mismatch local_index='%d'",index);
 310         }
 311         set_local(index, null());
 312         // and ignore it for the loads
 313         continue;
 314       }
 315     }
 316 
 317     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 318     if (type == Type::TOP || type == Type::HALF) {
 319       continue;
 320     }
 321     // If the type falls to bottom, then this must be a local that
 322     // is mixing ints and oops or some such.  Forcing it to top
 323     // makes it go dead.
 324     if (type == Type::BOTTOM) {
 325       continue;
 326     }
 327     // Construct code to access the appropriate local.
 328     Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);






 329     set_local(index, value);
 330   }
 331 
 332   // Extract the needed stack entries from the interpreter frame.
 333   for (index = 0; index < sp(); index++) {
 334     const Type *type = osr_block->stack_type_at(index);
 335     if (type != Type::TOP) {
 336       // Currently the compiler bails out when attempting to on stack replace
 337       // at a bci with a non-empty stack.  We should not reach here.
 338       ShouldNotReachHere();
 339     }
 340   }
 341 
 342   // End the OSR migration
 343   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 344                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 345                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 346                     osr_buf);
 347 
 348   // Now that the interpreter state is loaded, make sure it will match

 359     if (type->isa_oopptr() != nullptr) {
 360       if (!live_oops.at(index)) {
 361         // skip type check for dead oops
 362         continue;
 363       }
 364     }
 365     if (osr_block->flow()->local_type_at(index)->is_return_address()) {
 366       // In our current system it's illegal for jsr addresses to be
 367       // live into an OSR entry point because the compiler performs
 368       // inlining of jsrs.  ciTypeFlow has a bailout that detect this
 369       // case and aborts the compile if addresses are live into an OSR
 370       // entry point.  Because of that we can assume that any address
 371       // locals at the OSR entry point are dead.  Method liveness
 372       // isn't precise enough to figure out that they are dead in all
 373       // cases so simply skip checking address locals all
 374       // together. Any type check is guaranteed to fail since the
 375       // interpreter type is the result of a load which might have any
 376       // value and the expected type is a constant.
 377       continue;
 378     }
 379     const TypeKlassPtr* klass_type = nullptr;
 380     if (type->isa_oopptr()) {
 381       klass_type = TypeKlassPtr::make(osr_block->flow()->local_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
 382       klass_type = klass_type->try_improve();
 383     }
 384     bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
 385     set_local(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
 386   }
 387 
 388   for (index = 0; index < sp(); index++) {
 389     if (stopped())  break;
 390     Node* l = stack(index);
 391     if (l->is_top())  continue;  // nothing here
 392     const Type* type = osr_block->stack_type_at(index);
 393     const TypeKlassPtr* klass_type = nullptr;
 394     if (type->isa_oopptr()) {
 395       klass_type = TypeKlassPtr::make(osr_block->flow()->stack_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
 396       klass_type = klass_type->try_improve();
 397     }
 398     bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
 399     set_stack(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
 400   }
 401 
 402   if (bad_type_exit->control()->req() > 1) {
 403     // Build an uncommon trap here, if any inputs can be unexpected.
 404     bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
 405     record_for_igvn(bad_type_exit->control());
 406     SafePointNode* types_are_good = map();
 407     set_map(bad_type_exit);
 408     // The unexpected type happens because a new edge is active
 409     // in the CFG, which typeflow had previously ignored.
 410     // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
 411     // This x will be typed as Integer if notReached is not yet linked.
 412     // It could also happen due to a problem in ciTypeFlow analysis.
 413     uncommon_trap(Deoptimization::Reason_constraint,
 414                   Deoptimization::Action_reinterpret);
 415     set_map(types_are_good);
 416   }
 417 }
 418 
 419 //------------------------------Parse------------------------------------------

 520   // either breakpoint setting or hotswapping of methods may
 521   // cause deoptimization.
 522   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 523     C->dependencies()->assert_evol_method(method());
 524   }
 525 
 526   NOT_PRODUCT(methods_seen++);
 527 
 528   // Do some special top-level things.
 529   if (depth() == 1 && C->is_osr_compilation()) {
 530     _tf = C->tf();     // the OSR entry type is different
 531     _entry_bci = C->entry_bci();
 532     _flow = method()->get_osr_flow_analysis(osr_bci());
 533   } else {
 534     _tf = TypeFunc::make(method());
 535     _entry_bci = InvocationEntryBci;
 536     _flow = method()->get_flow_analysis();
 537   }
 538 
 539   if (_flow->failing()) {
 540     // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
 541     // can lead to this. Re-enable once 8284443 is fixed.
 542     //assert(false, "type flow analysis failed during parsing");
 543     C->record_method_not_compilable(_flow->failure_reason());
 544 #ifndef PRODUCT
 545       if (PrintOpto && (Verbose || WizardMode)) {
 546         if (is_osr_parse()) {
 547           tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 548         } else {
 549           tty->print_cr("type flow bailout: %s", _flow->failure_reason());
 550         }
 551         if (Verbose) {
 552           method()->print();
 553           method()->print_codes();
 554           _flow->print();
 555         }
 556       }
 557 #endif
 558   }
 559 
 560 #ifdef ASSERT
 561   if (depth() == 1) {
 562     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");

 613     load_interpreter_state(osr_buf);
 614   } else {
 615     set_map(entry_map);
 616     do_method_entry();
 617   }
 618 
 619   if (depth() == 1 && !failing()) {
 620     if (C->clinit_barrier_on_entry()) {
 621       // Add check to deoptimize the nmethod once the holder class is fully initialized
 622       clinit_deopt();
 623     }
 624   }
 625 
 626   // Check for bailouts during method entry.
 627   if (failing()) {
 628     if (log)  log->done("parse");
 629     C->set_default_node_notes(caller_nn);
 630     return;
 631   }
 632 
 633   // Handle inline type arguments
 634   int arg_size = method()->arg_size();
 635   for (int i = 0; i < arg_size; i++) {
 636     Node* parm = local(i);
 637     const Type* t = _gvn.type(parm);
 638     if (t->is_inlinetypeptr()) {
 639       // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
 640       // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
 641       if (!(method()->is_object_constructor() && i == 0)) {
 642         // Create InlineTypeNode from the oop and replace the parameter
 643         Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
 644         replace_in_map(parm, vt);
 645       }
 646     } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
 647                t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_flat() &&
 648                (!t->is_aryptr()->is_not_null_free() || !t->is_aryptr()->is_not_flat())) {
 649       // Speculate on varargs Object array being not null-free and not flat
 650       const TypePtr* spec_type = t->speculative();
 651       spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
 652       spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free()->cast_to_not_flat();
 653       spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
 654       Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
 655       replace_in_map(parm, cast);
 656     }
 657   }
 658 
 659   entry_map = map();  // capture any changes performed by method setup code
 660   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 661 
 662   // We begin parsing as if we have just encountered a jump to the
 663   // method entry.
 664   Block* entry_block = start_block();
 665   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 666   set_map_clone(entry_map);
 667   merge_common(entry_block, entry_block->next_path_num());
 668 
 669 #ifndef PRODUCT
 670   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 671   set_parse_histogram( parse_histogram_obj );
 672 #endif
 673 
 674   // Parse all the basic blocks.
 675   do_all_blocks();
 676 
 677   // Check for bailouts during conversion to graph
 678   if (failing()) {

 824 void Parse::build_exits() {
 825   // make a clone of caller to prevent sharing of side-effects
 826   _exits.set_map(_exits.clone_map());
 827   _exits.clean_stack(_exits.sp());
 828   _exits.sync_jvms();
 829 
 830   RegionNode* region = new RegionNode(1);
 831   record_for_igvn(region);
 832   gvn().set_type_bottom(region);
 833   _exits.set_control(region);
 834 
 835   // Note:  iophi and memphi are not transformed until do_exits.
 836   Node* iophi  = new PhiNode(region, Type::ABIO);
 837   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 838   gvn().set_type_bottom(iophi);
 839   gvn().set_type_bottom(memphi);
 840   _exits.set_i_o(iophi);
 841   _exits.set_all_memory(memphi);
 842 
 843   // Add a return value to the exit state.  (Do not push it yet.)
 844   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 845     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 846     if (ret_type->isa_int()) {
 847       BasicType ret_bt = method()->return_type()->basic_type();
 848       if (ret_bt == T_BOOLEAN ||
 849           ret_bt == T_CHAR ||
 850           ret_bt == T_BYTE ||
 851           ret_bt == T_SHORT) {
 852         ret_type = TypeInt::INT;
 853       }
 854     }
 855 
 856     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 857     // becomes loaded during the subsequent parsing, the loaded and unloaded
 858     // types will not join when we transform and push in do_exits().
 859     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 860     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 861       ret_type = TypeOopPtr::BOTTOM;
 862     }
 863     int         ret_size = type2size[ret_type->basic_type()];
 864     Node*       ret_phi  = new PhiNode(region, ret_type);
 865     gvn().set_type_bottom(ret_phi);
 866     _exits.ensure_stack(ret_size);
 867     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 868     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 869     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 870     // Note:  ret_phi is not yet pushed, until do_exits.
 871   }
 872 }
 873 

 874 //----------------------------build_start_state-------------------------------
 875 // Construct a state which contains only the incoming arguments from an
 876 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 877 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 878   int        arg_size = tf->domain_sig()->cnt();
 879   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
 880   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 881   SafePointNode* map  = new SafePointNode(max_size, jvms);
 882   jvms->set_map(map);
 883   record_for_igvn(map);
 884   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 885   Node_Notes* old_nn = default_node_notes();
 886   if (old_nn != nullptr && has_method()) {
 887     Node_Notes* entry_nn = old_nn->clone(this);
 888     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 889     entry_jvms->set_offsets(0);
 890     entry_jvms->set_bci(entry_bci());
 891     entry_nn->set_jvms(entry_jvms);
 892     set_default_node_notes(entry_nn);
 893   }
 894   PhaseGVN& gvn = *initial_gvn();
 895   uint i = 0;
 896   int arg_num = 0;
 897   for (uint j = 0; i < (uint)arg_size; i++) {
 898     const Type* t = tf->domain_sig()->field_at(i);
 899     Node* parm = nullptr;
 900     if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
 901       // Inline type arguments are not passed by reference: we get an argument per
 902       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 903       GraphKit kit(jvms, &gvn);
 904       kit.set_control(map->control());
 905       Node* old_mem = map->memory();
 906       // Use immutable memory for inline type loads and restore it below
 907       kit.set_all_memory(C->immutable_memory());
 908       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 909       map->set_control(kit.control());
 910       map->set_memory(old_mem);
 911     } else {
 912       parm = gvn.transform(new ParmNode(start, j++));
 913     }
 914     map->init_req(i, parm);
 915     // Record all these guys for later GVN.
 916     record_for_igvn(parm);
 917     if (i >= TypeFunc::Parms && t != Type::HALF) {
 918       arg_num++;
 919     }
 920   }
 921   for (; i < map->req(); i++) {
 922     map->init_req(i, top());
 923   }
 924   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 925   set_default_node_notes(old_nn);

 926   return jvms;
 927 }
 928 
 929 //-----------------------------make_node_notes---------------------------------
 930 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 931   if (caller_nn == nullptr)  return nullptr;
 932   Node_Notes* nn = caller_nn->clone(C);
 933   JVMState* caller_jvms = nn->jvms();
 934   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 935   jvms->set_offsets(0);
 936   jvms->set_bci(_entry_bci);
 937   nn->set_jvms(jvms);
 938   return nn;
 939 }
 940 
 941 
 942 //--------------------------return_values--------------------------------------
 943 void Compile::return_values(JVMState* jvms) {
 944   GraphKit kit(jvms);
 945   Node* ret = new ReturnNode(TypeFunc::Parms,
 946                              kit.control(),
 947                              kit.i_o(),
 948                              kit.reset_memory(),
 949                              kit.frameptr(),
 950                              kit.returnadr());
 951   // Add zero or 1 return values
 952   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
 953   if (ret_size > 0) {
 954     kit.inc_sp(-ret_size);  // pop the return value(s)
 955     kit.sync_jvms();
 956     Node* res = kit.argument(0);
 957     if (tf()->returns_inline_type_as_fields()) {
 958       // Multiple return values (inline type fields): add as many edges
 959       // to the Return node as returned values.
 960       InlineTypeNode* vt = res->as_InlineType();
 961       ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
 962       if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
 963         ret->init_req(TypeFunc::Parms, vt);
 964       } else {
 965         // Return the tagged klass pointer to signal scalarization to the caller
 966         Node* tagged_klass = vt->tagged_klass(kit.gvn());
 967         // Return null if the inline type is null (null marker field is not set)
 968         Node* conv   = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
 969         Node* shl    = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
 970         Node* shr    = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
 971         tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
 972         ret->init_req(TypeFunc::Parms, tagged_klass);
 973       }
 974       uint idx = TypeFunc::Parms + 1;
 975       vt->pass_fields(&kit, ret, idx, false, false);
 976     } else {
 977       ret->add_req(res);
 978       // Note:  The second dummy edge is not needed by a ReturnNode.
 979     }
 980   }
 981   // bind it to root
 982   root()->add_req(ret);
 983   record_for_igvn(ret);
 984   initial_gvn()->transform(ret);
 985 }
 986 
 987 //------------------------rethrow_exceptions-----------------------------------
 988 // Bind all exception states in the list into a single RethrowNode.
 989 void Compile::rethrow_exceptions(JVMState* jvms) {
 990   GraphKit kit(jvms);
 991   if (!kit.has_exceptions())  return;  // nothing to generate
 992   // Load my combined exception state into the kit, with all phis transformed:
 993   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 994   Node* ex_oop = kit.use_exception_state(ex_map);
 995   RethrowNode* exit = new RethrowNode(kit.control(),
 996                                       kit.i_o(), kit.reset_memory(),
 997                                       kit.frameptr(), kit.returnadr(),
 998                                       // like a return but with exception input
 999                                       ex_oop);

1083   //    to complete, we force all writes to complete.
1084   //
1085   // 2. Experimental VM option is used to force the barrier if any field
1086   //    was written out in the constructor.
1087   //
1088   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1089   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1090   //    MemBarVolatile is used before volatile load instead of after volatile
1091   //    store, so there's no barrier after the store.
1092   //    We want to guarantee the same behavior as on platforms with total store
1093   //    order, although this is not required by the Java memory model.
1094   //    In this case, we want to enforce visibility of volatile field
1095   //    initializations which are performed in constructors.
1096   //    So as with finals, we add a barrier here.
1097   //
1098   // "All bets are off" unless the first publication occurs after a
1099   // normal return from the constructor.  We do not attempt to detect
1100   // such unusual early publications.  But no barrier is needed on
1101   // exceptional returns, since they cannot publish normally.
1102   //
1103   if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1104        (wrote_final() || wrote_stable() ||
1105          (AlwaysSafeConstructors && wrote_fields()) ||
1106          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1107     Node* recorded_alloc = alloc_with_final_or_stable();
1108     _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1109                           recorded_alloc);
1110 
1111     // If Memory barrier is created for final fields write
1112     // and allocation node does not escape the initialize method,
1113     // then barrier introduced by allocation node can be removed.
1114     if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1115       AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1116       alloc->compute_MemBar_redundancy(method());
1117     }
1118     if (PrintOpto && (Verbose || WizardMode)) {
1119       method()->print_name();
1120       tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1121     }
1122   }
1123 
1124   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1125     // transform each slice of the original memphi:
1126     mms.set_memory(_gvn.transform(mms.memory()));
1127   }
1128   // Clean up input MergeMems created by transforming the slices
1129   _gvn.transform(_exits.merged_memory());
1130 
1131   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1132     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1133     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1134     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1135       // If the type we set for the ret_phi in build_exits() is too optimistic and
1136       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1137       // loading.  It could also be due to an error, so mark this method as not compilable because
1138       // otherwise this could lead to an infinite compile loop.
1139       // In any case, this code path is rarely (and never in my testing) reached.
1140       C->record_method_not_compilable("Can't determine return type.");
1141       return;
1142     }
1143     if (ret_type->isa_int()) {
1144       BasicType ret_bt = method()->return_type()->basic_type();
1145       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1146     }
1147     _exits.push_node(ret_type->basic_type(), ret_phi);
1148   }
1149 
1150   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1151 
1152   // Unlock along the exceptional paths.

1206 
1207 //-----------------------------create_entry_map-------------------------------
1208 // Initialize our parser map to contain the types at method entry.
1209 // For OSR, the map contains a single RawPtr parameter.
1210 // Initial monitor locking for sync. methods is performed by do_method_entry.
1211 SafePointNode* Parse::create_entry_map() {
1212   // Check for really stupid bail-out cases.
1213   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1214   if (len >= 32760) {
1215     // Bailout expected, this is a very rare edge case.
1216     C->record_method_not_compilable("too many local variables");
1217     return nullptr;
1218   }
1219 
1220   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1221   _caller->map()->delete_replaced_nodes();
1222 
1223   // If this is an inlined method, we may have to do a receiver null check.
1224   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1225     GraphKit kit(_caller);
1226     Node* receiver = kit.argument(0);
1227     Node* null_free = kit.null_check_receiver_before_call(method());
1228     _caller = kit.transfer_exceptions_into_jvms();
1229 
1230     if (kit.stopped()) {
1231       _exits.add_exception_states_from(_caller);
1232       _exits.set_jvms(_caller);
1233       return nullptr;
1234     }
1235   }
1236 
1237   assert(method() != nullptr, "parser must have a method");
1238 
1239   // Create an initial safepoint to hold JVM state during parsing
1240   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1241   set_map(new SafePointNode(len, jvms));
1242 
1243   // Capture receiver info for compiled lambda forms.
1244   if (method()->is_compiled_lambda_form()) {
1245     ciInstance* recv_info = _caller->compute_receiver_info(method());
1246     jvms->set_receiver_info(recv_info);
1247   }
1248 
1249   jvms->set_map(map());

1253   SafePointNode* inmap = _caller->map();
1254   assert(inmap != nullptr, "must have inmap");
1255   // In case of null check on receiver above
1256   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1257 
1258   uint i;
1259 
1260   // Pass thru the predefined input parameters.
1261   for (i = 0; i < TypeFunc::Parms; i++) {
1262     map()->init_req(i, inmap->in(i));
1263   }
1264 
1265   if (depth() == 1) {
1266     assert(map()->memory()->Opcode() == Op_Parm, "");
1267     // Insert the memory aliasing node
1268     set_all_memory(reset_memory());
1269   }
1270   assert(merged_memory(), "");
1271 
1272   // Now add the locals which are initially bound to arguments:
1273   uint arg_size = tf()->domain_sig()->cnt();
1274   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1275   for (i = TypeFunc::Parms; i < arg_size; i++) {
1276     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1277   }
1278 
1279   // Clear out the rest of the map (locals and stack)
1280   for (i = arg_size; i < len; i++) {
1281     map()->init_req(i, top());
1282   }
1283 
1284   SafePointNode* entry_map = stop();
1285   return entry_map;
1286 }
1287 
1288 //-----------------------------do_method_entry--------------------------------
1289 // Emit any code needed in the pseudo-block before BCI zero.
1290 // The main thing to do is lock the receiver of a synchronized method.
1291 void Parse::do_method_entry() {
1292   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1293   set_sp(0);                         // Java Stack Pointer
1294 
1295   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1296 
1297   // Check if we need a membar at the beginning of the java.lang.Object
1298   // constructor to satisfy the memory model for strict fields.
1299   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1300     Node* receiver_obj = local(0);
1301     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1302     // If there's no exact type, check if the declared type has no implementors and add a dependency
1303     const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1304     ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1305     if (klass != nullptr && klass->is_instance_klass()) {
1306       // Exact receiver type, check if there is a strict field
1307       ciInstanceKlass* holder = klass->as_instance_klass();
1308       for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1309         ciField* field = holder->nonstatic_field_at(i);
1310         if (field->is_strict()) {
1311           // Found a strict field, a membar is needed
1312           AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1313           insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1314           if (DoEscapeAnalysis && (alloc != nullptr)) {
1315             alloc->compute_MemBar_redundancy(method());
1316           }
1317           break;
1318         }
1319       }
1320     } else if (klass == nullptr) {
1321       // We can't statically determine the type of the receiver and therefore need
1322       // to put a membar here because it could have a strict field.
1323       insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1324     }
1325   }
1326 
1327   if (C->env()->dtrace_method_probes()) {
1328     make_dtrace_method_entry(method());
1329   }
1330 
1331 #ifdef ASSERT
1332   // Narrow receiver type when it is too broad for the method being parsed.
1333   if (!method()->is_static()) {
1334     ciInstanceKlass* callee_holder = method()->holder();
1335     const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1336 
1337     Node* receiver_obj = local(0);
1338     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1339 
1340     if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1341       // Receiver should always be a subtype of callee holder.
1342       // But, since C2 type system doesn't properly track interfaces,
1343       // the invariant can't be expressed in the type system for default methods.
1344       // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1345       assert(callee_holder->is_interface(), "missing subtype check");
1346 

1357 
1358   // If the method is synchronized, we need to construct a lock node, attach
1359   // it to the Start node, and pin it there.
1360   if (method()->is_synchronized()) {
1361     // Insert a FastLockNode right after the Start which takes as arguments
1362     // the current thread pointer, the "this" pointer & the address of the
1363     // stack slot pair used for the lock.  The "this" pointer is a projection
1364     // off the start node, but the locking spot has to be constructed by
1365     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1366     // becomes the second argument to the FastLockNode call.  The
1367     // FastLockNode becomes the new control parent to pin it to the start.
1368 
1369     // Setup Object Pointer
1370     Node *lock_obj = nullptr;
1371     if (method()->is_static()) {
1372       ciInstance* mirror = _method->holder()->java_mirror();
1373       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1374       lock_obj = makecon(t_lock);
1375     } else {                  // Else pass the "this" pointer,
1376       lock_obj = local(0);    // which is Parm0 from StartNode
1377       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1378     }
1379     // Clear out dead values from the debug info.
1380     kill_dead_locals();
1381     // Build the FastLockNode
1382     _synch_lock = shared_lock(lock_obj);
1383     // Check for bailout in shared_lock
1384     if (failing()) { return; }
1385   }
1386 
1387   // Feed profiling data for parameters to the type system so it can
1388   // propagate it as speculative types
1389   record_profiled_parameters_for_speculation();
1390 }
1391 
1392 //------------------------------init_blocks------------------------------------
1393 // Initialize our parser map to contain the types/monitors at method entry.
1394 void Parse::init_blocks() {
1395   // Create the blocks.
1396   _block_count = flow()->block_count();
1397   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);

1793 //--------------------handle_missing_successor---------------------------------
1794 void Parse::handle_missing_successor(int target_bci) {
1795 #ifndef PRODUCT
1796   Block* b = block();
1797   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1798   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1799 #endif
1800   ShouldNotReachHere();
1801 }
1802 
1803 //--------------------------merge_common---------------------------------------
1804 void Parse::merge_common(Parse::Block* target, int pnum) {
1805   if (TraceOptoParse) {
1806     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1807   }
1808 
1809   // Zap extra stack slots to top
1810   assert(sp() == target->start_sp(), "");
1811   clean_stack(sp());
1812 
1813   // Check for merge conflicts involving inline types
1814   JVMState* old_jvms = map()->jvms();
1815   int old_bci = bci();
1816   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1817   tmp_jvms->set_should_reexecute(true);
1818   tmp_jvms->bind_map(map());
1819   // Execution needs to restart a the next bytecode (entry of next
1820   // block)
1821   if (target->is_merged() ||
1822       pnum > PhiNode::Input ||
1823       target->is_handler() ||
1824       target->is_loop_head()) {
1825     set_parse_bci(target->start());
1826     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1827       Node* n = map()->in(j);                 // Incoming change to target state.
1828       const Type* t = nullptr;
1829       if (tmp_jvms->is_loc(j)) {
1830         t = target->local_type_at(j - tmp_jvms->locoff());
1831       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1832         t = target->stack_type_at(j - tmp_jvms->stkoff());
1833       }
1834       if (t != nullptr && t != Type::BOTTOM) {
1835         // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1836         // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1837         // merge is an oop, each merge input needs to be either an oop or an buffered
1838         // InlineTypeNode.
1839         if (!t->is_inlinetypeptr()) {
1840           // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1841           // InlineTypeNode
1842           if (n->is_InlineType()) {
1843             map()->set_req(j, n->as_InlineType()->buffer(this));
1844           }
1845         } else {
1846           // Since the merge is a value object, it can either be an oop or an InlineTypeNode
1847           if (!target->is_merged()) {
1848             // This is the first processed input of the merge. If it is an InlineTypeNode, the
1849             // merge will be an InlineTypeNode. Else, try to scalarize so the merge can be
1850             // scalarized as well. However, we cannot blindly scalarize an inline type oop here
1851             // since it may be larval
1852             if (!n->is_InlineType() && gvn().type(n)->is_zero_type()) {
1853               // Null constant implies that this is not a larval object
1854               map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1855             }
1856           } else {
1857             Node* phi = target->start_map()->in(j);
1858             if (phi->is_InlineType()) {
1859               // Larval oops cannot be merged with non-larval ones, and since the merge point is
1860               // non-larval, n must be non-larval as well. As a result, we can scalarize n to merge
1861               // into phi
1862               if (!n->is_InlineType()) {
1863                 map()->set_req(j, InlineTypeNode::make_from_oop(this, n, t->inline_klass()));
1864               }
1865             } else {
1866               // The merge is an oop phi, ensure the input is buffered if it is an InlineTypeNode
1867               if (n->is_InlineType()) {
1868                 map()->set_req(j, n->as_InlineType()->buffer(this));
1869               }
1870             }
1871           }
1872         }
1873       }
1874     }
1875   }
1876   old_jvms->bind_map(map());
1877   set_parse_bci(old_bci);
1878 
1879   if (!target->is_merged()) {   // No prior mapping at this bci
1880     if (TraceOptoParse) { tty->print(" with empty state");  }
1881 
1882     // If this path is dead, do not bother capturing it as a merge.
1883     // It is "as if" we had 1 fewer predecessors from the beginning.
1884     if (stopped()) {
1885       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1886       return;
1887     }
1888 
1889     // Make a region if we know there are multiple or unpredictable inputs.
1890     // (Also, if this is a plain fall-through, we might see another region,
1891     // which must not be allowed into this block's map.)
1892     if (pnum > PhiNode::Input         // Known multiple inputs.
1893         || target->is_handler()       // These have unpredictable inputs.
1894         || target->is_loop_head()     // Known multiple inputs
1895         || control()->is_Region()) {  // We must hide this guy.
1896 
1897       int current_bci = bci();
1898       set_parse_bci(target->start()); // Set target bci

1913       record_for_igvn(r);
1914       // zap all inputs to null for debugging (done in Node(uint) constructor)
1915       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1916       r->init_req(pnum, control());
1917       set_control(r);
1918       target->copy_irreducible_status_to(r, jvms());
1919       set_parse_bci(current_bci); // Restore bci
1920     }
1921 
1922     // Convert the existing Parser mapping into a mapping at this bci.
1923     store_state_to(target);
1924     assert(target->is_merged(), "do not come here twice");
1925 
1926   } else {                      // Prior mapping at this bci
1927     if (TraceOptoParse) {  tty->print(" with previous state"); }
1928 #ifdef ASSERT
1929     if (target->is_SEL_head()) {
1930       target->mark_merged_backedge(block());
1931     }
1932 #endif
1933 
1934     // We must not manufacture more phis if the target is already parsed.
1935     bool nophi = target->is_parsed();
1936 
1937     SafePointNode* newin = map();// Hang on to incoming mapping
1938     Block* save_block = block(); // Hang on to incoming block;
1939     load_state_from(target);    // Get prior mapping
1940 
1941     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1942     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1943     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1944     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1945 
1946     // Iterate over my current mapping and the old mapping.
1947     // Where different, insert Phi functions.
1948     // Use any existing Phi functions.
1949     assert(control()->is_Region(), "must be merging to a region");
1950     RegionNode* r = control()->as_Region();
1951 
1952     // Compute where to merge into
1953     // Merge incoming control path
1954     r->init_req(pnum, newin->control());
1955 
1956     if (pnum == 1) {            // Last merge for this Region?
1957       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1958         Node* result = _gvn.transform(r);
1959         if (r != result && TraceOptoParse) {
1960           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1961         }
1962       }
1963       record_for_igvn(r);
1964     }
1965 
1966     // Update all the non-control inputs to map:
1967     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1968     bool check_elide_phi = target->is_SEL_backedge(save_block);
1969     bool last_merge = (pnum == PhiNode::Input);
1970     for (uint j = 1; j < newin->req(); j++) {
1971       Node* m = map()->in(j);   // Current state of target.
1972       Node* n = newin->in(j);   // Incoming change to target state.
1973       Node* phi;
1974       if (m->is_Phi() && m->as_Phi()->region() == r) {
1975         phi = m;
1976       } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1977         phi = m;
1978       } else {
1979         phi = nullptr;
1980       }
1981       if (m != n) {             // Different; must merge
1982         switch (j) {
1983         // Frame pointer and Return Address never changes
1984         case TypeFunc::FramePtr:// Drop m, use the original value
1985         case TypeFunc::ReturnAdr:
1986           break;
1987         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1988           assert(phi == nullptr, "the merge contains phis, not vice versa");
1989           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1990           continue;
1991         default:                // All normal stuff
1992           if (phi == nullptr) {
1993             const JVMState* jvms = map()->jvms();
1994             if (EliminateNestedLocks &&
1995                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1996               // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1997               // Use old BoxLock node as merged box.
1998               assert(newin->jvms()->is_monitor_box(j), "sanity");
1999               // This assert also tests that nodes are BoxLock.
2000               assert(BoxLockNode::same_slot(n, m), "sanity");

2007                 // Incremental Inlining before EA and Macro nodes elimination.
2008                 //
2009                 // Incremental Inlining is executed after IGVN optimizations
2010                 // during which BoxLock can be marked as Coarsened.
2011                 old_box->set_coarsened(); // Verifies state
2012                 old_box->set_unbalanced();
2013               }
2014               C->gvn_replace_by(n, m);
2015             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2016               phi = ensure_phi(j, nophi);
2017             }
2018           }
2019           break;
2020         }
2021       }
2022       // At this point, n might be top if:
2023       //  - there is no phi (because TypeFlow detected a conflict), or
2024       //  - the corresponding control edges is top (a dead incoming path)
2025       // It is a bug if we create a phi which sees a garbage value on a live path.
2026 
2027       // Merging two inline types?
2028       if (phi != nullptr && phi->is_InlineType()) {
2029         // Reload current state because it may have been updated by ensure_phi
2030         assert(phi == map()->in(j), "unexpected value in map");
2031         assert(phi->as_InlineType()->has_phi_inputs(r), "");
2032         InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2033         InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2034         assert(vtm == phi, "Inline type should have Phi input");
2035 
2036 #ifdef ASSERT
2037         if (TraceOptoParse) {
2038           tty->print_cr("\nMerging inline types");
2039           tty->print_cr("Current:");
2040           vtm->dump(2);
2041           tty->print_cr("Incoming:");
2042           vtn->dump(2);
2043           tty->cr();
2044         }
2045 #endif
2046         // Do the merge
2047         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2048         if (last_merge) {
2049           map()->set_req(j, _gvn.transform(vtm));
2050           record_for_igvn(vtm);
2051         }
2052       } else if (phi != nullptr) {
2053         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2054         assert(phi->as_Phi()->region() == r, "");
2055         phi->set_req(pnum, n);  // Then add 'n' to the merge
2056         if (last_merge) {
2057           // Last merge for this Phi.
2058           // So far, Phis have had a reasonable type from ciTypeFlow.
2059           // Now _gvn will join that with the meet of current inputs.
2060           // BOTTOM is never permissible here, 'cause pessimistically
2061           // Phis of pointers cannot lose the basic pointer type.
2062           DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2063           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2064           map()->set_req(j, _gvn.transform(phi));
2065           DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2066           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2067           record_for_igvn(phi);
2068         }
2069       }
2070     } // End of for all values to be merged
2071 
2072     if (last_merge && !r->in(0)) {         // The occasional useless Region

2073       assert(control() == r, "");
2074       set_control(r->nonnull_req());
2075     }
2076 
2077     map()->merge_replaced_nodes_with(newin);
2078 
2079     // newin has been subsumed into the lazy merge, and is now dead.
2080     set_block(save_block);
2081 
2082     stop();                     // done with this guy, for now
2083   }
2084 
2085   if (TraceOptoParse) {
2086     tty->print_cr(" on path %d", pnum);
2087   }
2088 
2089   // Done with this parser state.
2090   assert(stopped(), "");
2091 }
2092 

2204 
2205   // Add new path to the region.
2206   uint pnum = r->req();
2207   r->add_req(nullptr);
2208 
2209   for (uint i = 1; i < map->req(); i++) {
2210     Node* n = map->in(i);
2211     if (i == TypeFunc::Memory) {
2212       // Ensure a phi on all currently known memories.
2213       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2214         Node* phi = mms.memory();
2215         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2216           assert(phi->req() == pnum, "must be same size as region");
2217           phi->add_req(nullptr);
2218         }
2219       }
2220     } else {
2221       if (n->is_Phi() && n->as_Phi()->region() == r) {
2222         assert(n->req() == pnum, "must be same size as region");
2223         n->add_req(nullptr);
2224       } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2225         n->as_InlineType()->add_new_path(r);
2226       }
2227     }
2228   }
2229 
2230   return pnum;
2231 }
2232 
2233 //------------------------------ensure_phi-------------------------------------
2234 // Turn the idx'th entry of the current map into a Phi
2235 Node* Parse::ensure_phi(int idx, bool nocreate) {
2236   SafePointNode* map = this->map();
2237   Node* region = map->control();
2238   assert(region->is_Region(), "");
2239 
2240   Node* o = map->in(idx);
2241   assert(o != nullptr, "");
2242 
2243   if (o == top())  return nullptr; // TOP always merges into TOP
2244 
2245   if (o->is_Phi() && o->as_Phi()->region() == region) {
2246     return o->as_Phi();
2247   }
2248   InlineTypeNode* vt = o->isa_InlineType();
2249   if (vt != nullptr && vt->has_phi_inputs(region)) {
2250     return vt;
2251   }
2252 
2253   // Now use a Phi here for merging
2254   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2255   const JVMState* jvms = map->jvms();
2256   const Type* t = nullptr;
2257   if (jvms->is_loc(idx)) {
2258     t = block()->local_type_at(idx - jvms->locoff());
2259   } else if (jvms->is_stk(idx)) {
2260     t = block()->stack_type_at(idx - jvms->stkoff());
2261   } else if (jvms->is_mon(idx)) {
2262     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2263     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2264   } else if ((uint)idx < TypeFunc::Parms) {
2265     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2266   } else {
2267     assert(false, "no type information for this phi");
2268   }
2269 
2270   // If the type falls to bottom, then this must be a local that
2271   // is already dead or is mixing ints and oops or some such.
2272   // Forcing it to top makes it go dead.
2273   if (t == Type::BOTTOM) {
2274     map->set_req(idx, top());
2275     return nullptr;
2276   }
2277 
2278   // Do not create phis for top either.
2279   // A top on a non-null control flow must be an unused even after the.phi.
2280   if (t == Type::TOP || t == Type::HALF) {
2281     map->set_req(idx, top());
2282     return nullptr;
2283   }
2284 
2285   if (vt != nullptr && t->is_inlinetypeptr()) {
2286     // Inline types are merged by merging their field values.
2287     // Create a cloned InlineTypeNode with phi inputs that
2288     // represents the merged inline type and update the map.
2289     vt = vt->clone_with_phis(&_gvn, region);
2290     map->set_req(idx, vt);
2291     return vt;
2292   } else {
2293     PhiNode* phi = PhiNode::make(region, o, t);
2294     gvn().set_type(phi, t);
2295     if (C->do_escape_analysis()) record_for_igvn(phi);
2296     map->set_req(idx, phi);
2297     return phi;
2298   }
2299 }
2300 
2301 //--------------------------ensure_memory_phi----------------------------------
2302 // Turn the idx'th slice of the current memory into a Phi
2303 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2304   MergeMemNode* mem = merged_memory();
2305   Node* region = control();
2306   assert(region->is_Region(), "");
2307 
2308   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2309   assert(o != nullptr && o != top(), "");
2310 
2311   PhiNode* phi;
2312   if (o->is_Phi() && o->as_Phi()->region() == region) {
2313     phi = o->as_Phi();
2314     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2315       // clone the shared base memory phi to make a new memory split
2316       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2317       const Type* t = phi->bottom_type();
2318       const TypePtr* adr_type = C->get_adr_type(idx);

2408 // Add check to deoptimize once holder klass is fully initialized.
2409 void Parse::clinit_deopt() {
2410   assert(C->has_method(), "only for normal compilations");
2411   assert(depth() == 1, "only for main compiled method");
2412   assert(is_normal_parse(), "no barrier needed on osr entry");
2413   assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2414 
2415   set_parse_bci(0);
2416 
2417   Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2418   guard_klass_being_initialized(holder);
2419 }
2420 
2421 //------------------------------return_current---------------------------------
2422 // Append current _map to _exit_return
2423 void Parse::return_current(Node* value) {
2424   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2425     call_register_finalizer();
2426   }
2427 
2428   // frame pointer is always same, already captured
2429   if (value != nullptr) {
2430     Node* phi = _exits.argument(0);
2431     const Type* return_type = phi->bottom_type();
2432     const TypeInstPtr* tr = return_type->isa_instptr();
2433     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2434         return_type->is_inlinetypeptr()) {
2435       // Inline type is returned as fields, make sure it is scalarized
2436       if (!value->is_InlineType()) {
2437         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2438       }
2439       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2440         // Returning from root or an incrementally inlined method. Make sure all non-flat
2441         // fields are buffered and re-execute if allocation triggers deoptimization.
2442         PreserveReexecuteState preexecs(this);
2443         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2444         jvms()->set_should_reexecute(true);
2445         inc_sp(1);
2446         value = value->as_InlineType()->allocate_fields(this);
2447       }
2448     } else if (value->is_InlineType()) {
2449       // Inline type is returned as oop, make sure it is buffered and re-execute
2450       // if allocation triggers deoptimization.
2451       PreserveReexecuteState preexecs(this);
2452       jvms()->set_should_reexecute(true);
2453       inc_sp(1);
2454       value = value->as_InlineType()->buffer(this);
2455     }
2456     // ...else
2457     // If returning oops to an interface-return, there is a silent free
2458     // cast from oop to interface allowed by the Verifier. Make it explicit here.
2459     phi->add_req(value);
2460   }
2461 
2462   // Do not set_parse_bci, so that return goo is credited to the return insn.
2463   set_bci(InvocationEntryBci);
2464   if (method()->is_synchronized()) {
2465     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2466   }
2467   if (C->env()->dtrace_method_probes()) {
2468     make_dtrace_method_exit(method());
2469   }
2470 
2471   SafePointNode* exit_return = _exits.map();
2472   exit_return->in( TypeFunc::Control  )->add_req( control() );
2473   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2474   Node *mem = exit_return->in( TypeFunc::Memory   );
2475   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2476     if (mms.is_empty()) {
2477       // get a copy of the base memory, and patch just this one input
2478       const TypePtr* adr_type = mms.adr_type(C);
2479       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2480       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2481       gvn().set_type_bottom(phi);
2482       phi->del_req(phi->req()-1);  // prepare to re-patch
2483       mms.set_memory(phi);
2484     }
2485     mms.memory()->add_req(mms.memory2());
2486   }
2487 









2488   if (_first_return) {
2489     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2490     _first_return = false;
2491   } else {
2492     _exits.map()->merge_replaced_nodes_with(map());
2493   }
2494 
2495   stop_and_kill_map();          // This CFG path dies here
2496 }
2497 
2498 
2499 //------------------------------add_safepoint----------------------------------
2500 void Parse::add_safepoint() {
2501   uint parms = TypeFunc::Parms+1;
2502 
2503   // Clear out dead values from the debug info.
2504   kill_dead_locals();
2505 
2506   // Clone the JVM State
2507   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
< prev index next >