< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page

   1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 


  25 #include "compiler/compileLog.hpp"
  26 #include "interpreter/linkResolver.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "oops/method.hpp"
  29 #include "opto/addnode.hpp"
  30 #include "opto/c2compiler.hpp"
  31 #include "opto/castnode.hpp"

  32 #include "opto/idealGraphPrinter.hpp"

  33 #include "opto/locknode.hpp"
  34 #include "opto/memnode.hpp"
  35 #include "opto/opaquenode.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "opto/type.hpp"

  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/copy.hpp"
  45 
  46 // Static array so we can figure out which bytecodes stop us from compiling
  47 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  48 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  49 
  50 #ifndef PRODUCT
  51 uint nodes_created             = 0;
  52 uint methods_parsed            = 0;
  53 uint methods_seen              = 0;
  54 uint blocks_parsed             = 0;
  55 uint blocks_seen               = 0;
  56 
  57 uint explicit_null_checks_inserted = 0;
  58 uint explicit_null_checks_elided   = 0;
  59 uint all_null_checks_found         = 0;

  84   }
  85   if (all_null_checks_found) {
  86     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  87                   (100*implicit_null_checks)/all_null_checks_found);
  88   }
  89   if (SharedRuntime::_implicit_null_throws) {
  90     tty->print_cr("%u implicit null exceptions at runtime",
  91                   SharedRuntime::_implicit_null_throws);
  92   }
  93 
  94   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  95     BytecodeParseHistogram::print();
  96   }
  97 }
  98 #endif
  99 
 100 //------------------------------ON STACK REPLACEMENT---------------------------
 101 
 102 // Construct a node which can be used to get incoming state for
 103 // on stack replacement.
 104 Node *Parse::fetch_interpreter_state(int index,
 105                                      BasicType bt,
 106                                      Node* local_addrs) {






 107   Node *mem = memory(Compile::AliasIdxRaw);
 108   Node *adr = basic_plus_adr(top(), local_addrs, -index*wordSize);
 109   Node *ctl = control();
 110 
 111   // Very similar to LoadNode::make, except we handle un-aligned longs and
 112   // doubles on Sparc.  Intel can handle them just fine directly.
 113   Node *l = nullptr;
 114   switch (bt) {                // Signature is flattened
 115   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 116   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 117   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 118   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 119   case T_LONG:
 120   case T_DOUBLE: {
 121     // Since arguments are in reverse order, the argument address 'adr'
 122     // refers to the back half of the long/double.  Recompute adr.
 123     adr = basic_plus_adr(top(), local_addrs, -(index+1)*wordSize);
 124     if (Matcher::misaligned_doubles_ok) {
 125       l = (bt == T_DOUBLE)
 126         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 127         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 128     } else {
 129       l = (bt == T_DOUBLE)
 130         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 131         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 132     }
 133     break;
 134   }
 135   default: ShouldNotReachHere();
 136   }
 137   return _gvn.transform(l);
 138 }
 139 
 140 // Helper routine to prevent the interpreter from handing
 141 // unexpected typestate to an OSR method.
 142 // The Node l is a value newly dug out of the interpreter frame.
 143 // The type is the type predicted by ciTypeFlow.  Note that it is
 144 // not a general type, but can only come from Type::get_typeflow_type.
 145 // The safepoint is a map which will feed an uncommon trap.
 146 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 147                                     SafePointNode* &bad_type_exit) {
 148 
 149   const TypeOopPtr* tp = type->isa_oopptr();
 150 
 151   // TypeFlow may assert null-ness if a type appears unloaded.
 152   if (type == TypePtr::NULL_PTR ||
 153       (tp != nullptr && !tp->is_loaded())) {
 154     // Value must be null, not a real oop.
 155     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 156     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 157     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 158     set_control(_gvn.transform( new IfTrueNode(iff) ));
 159     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 160     bad_type_exit->control()->add_req(bad_type);
 161     l = null();
 162   }
 163 
 164   // Typeflow can also cut off paths from the CFG, based on
 165   // types which appear unloaded, or call sites which appear unlinked.
 166   // When paths are cut off, values at later merge points can rise
 167   // toward more specific classes.  Make sure these specific classes
 168   // are still in effect.
 169   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 170     // TypeFlow asserted a specific object type.  Value must have that type.
 171     Node* bad_type_ctrl = nullptr;
 172     l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);







 173     bad_type_exit->control()->add_req(bad_type_ctrl);
 174   }
 175 
 176   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 177   return l;
 178 }
 179 
 180 // Helper routine which sets up elements of the initial parser map when
 181 // performing a parse for on stack replacement.  Add values into map.
 182 // The only parameter contains the address of a interpreter arguments.
 183 void Parse::load_interpreter_state(Node* osr_buf) {
 184   int index;
 185   int max_locals = jvms()->loc_size();
 186   int max_stack  = jvms()->stk_size();
 187 
 188 
 189   // Mismatch between method and jvms can occur since map briefly held
 190   // an OSR entry state (which takes up one RawPtr word).
 191   assert(max_locals == method()->max_locals(), "sanity");
 192   assert(max_stack  >= method()->max_stack(),  "sanity");
 193   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 194   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 195 
 196   // Find the start block.
 197   Block* osr_block = start_block();
 198   assert(osr_block->start() == osr_bci(), "sanity");
 199 
 200   // Set initial BCI.
 201   set_parse_bci(osr_block->start());
 202 
 203   // Set initial stack depth.
 204   set_sp(osr_block->start_sp());
 205 
 206   // Check bailouts.  We currently do not perform on stack replacement
 207   // of loops in catch blocks or loops which branch with a non-empty stack.
 208   if (sp() != 0) {

 223   for (index = 0; index < mcnt; index++) {
 224     // Make a BoxLockNode for the monitor.
 225     BoxLockNode* osr_box = new BoxLockNode(next_monitor());
 226     // Check for bailout after new BoxLockNode
 227     if (failing()) { return; }
 228 
 229     // This OSR locking region is unbalanced because it does not have Lock node:
 230     // locking was done in Interpreter.
 231     // This is similar to Coarsened case when Lock node is eliminated
 232     // and as result the region is marked as Unbalanced.
 233 
 234     // Emulate Coarsened state transition from Regular to Unbalanced.
 235     osr_box->set_coarsened();
 236     osr_box->set_unbalanced();
 237 
 238     Node* box = _gvn.transform(osr_box);
 239 
 240     // Displaced headers and locked objects are interleaved in the
 241     // temp OSR buffer.  We only copy the locked objects out here.
 242     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 243     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr);
 244     // Try and copy the displaced header to the BoxNode
 245     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr);
 246 
 247 
 248     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
 249 
 250     // Build a bogus FastLockNode (no code will be generated) and push the
 251     // monitor into our debug info.
 252     const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
 253     map()->push_monitor(flock);
 254 
 255     // If the lock is our method synchronization lock, tuck it away in
 256     // _sync_lock for return and rethrow exit paths.
 257     if (index == 0 && method()->is_synchronized()) {
 258       _synch_lock = flock;
 259     }
 260   }
 261 
 262   // Use the raw liveness computation to make sure that unexpected
 263   // values don't propagate into the OSR frame.
 264   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 265   if (!live_locals.is_valid()) {
 266     // Degenerate or breakpointed method.

 294         if (C->log() != nullptr) {
 295           C->log()->elem("OSR_mismatch local_index='%d'",index);
 296         }
 297         set_local(index, null());
 298         // and ignore it for the loads
 299         continue;
 300       }
 301     }
 302 
 303     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 304     if (type == Type::TOP || type == Type::HALF) {
 305       continue;
 306     }
 307     // If the type falls to bottom, then this must be a local that
 308     // is mixing ints and oops or some such.  Forcing it to top
 309     // makes it go dead.
 310     if (type == Type::BOTTOM) {
 311       continue;
 312     }
 313     // Construct code to access the appropriate local.
 314     BasicType bt = type->basic_type();
 315     if (type == TypePtr::NULL_PTR) {
 316       // Ptr types are mixed together with T_ADDRESS but null is
 317       // really for T_OBJECT types so correct it.
 318       bt = T_OBJECT;
 319     }
 320     Node *value = fetch_interpreter_state(index, bt, locals_addr);
 321     set_local(index, value);
 322   }
 323 
 324   // Extract the needed stack entries from the interpreter frame.
 325   for (index = 0; index < sp(); index++) {
 326     const Type *type = osr_block->stack_type_at(index);
 327     if (type != Type::TOP) {
 328       // Currently the compiler bails out when attempting to on stack replace
 329       // at a bci with a non-empty stack.  We should not reach here.
 330       ShouldNotReachHere();
 331     }
 332   }
 333 
 334   // End the OSR migration
 335   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 336                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 337                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 338                     osr_buf);
 339 
 340   // Now that the interpreter state is loaded, make sure it will match

 351     if (type->isa_oopptr() != nullptr) {
 352       if (!live_oops.at(index)) {
 353         // skip type check for dead oops
 354         continue;
 355       }
 356     }
 357     if (osr_block->flow()->local_type_at(index)->is_return_address()) {
 358       // In our current system it's illegal for jsr addresses to be
 359       // live into an OSR entry point because the compiler performs
 360       // inlining of jsrs.  ciTypeFlow has a bailout that detect this
 361       // case and aborts the compile if addresses are live into an OSR
 362       // entry point.  Because of that we can assume that any address
 363       // locals at the OSR entry point are dead.  Method liveness
 364       // isn't precise enough to figure out that they are dead in all
 365       // cases so simply skip checking address locals all
 366       // together. Any type check is guaranteed to fail since the
 367       // interpreter type is the result of a load which might have any
 368       // value and the expected type is a constant.
 369       continue;
 370     }
 371     set_local(index, check_interpreter_type(l, type, bad_type_exit));






 372   }
 373 
 374   for (index = 0; index < sp(); index++) {
 375     if (stopped())  break;
 376     Node* l = stack(index);
 377     if (l->is_top())  continue;  // nothing here
 378     const Type *type = osr_block->stack_type_at(index);
 379     set_stack(index, check_interpreter_type(l, type, bad_type_exit));






 380   }
 381 
 382   if (bad_type_exit->control()->req() > 1) {
 383     // Build an uncommon trap here, if any inputs can be unexpected.
 384     bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
 385     record_for_igvn(bad_type_exit->control());
 386     SafePointNode* types_are_good = map();
 387     set_map(bad_type_exit);
 388     // The unexpected type happens because a new edge is active
 389     // in the CFG, which typeflow had previously ignored.
 390     // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
 391     // This x will be typed as Integer if notReached is not yet linked.
 392     // It could also happen due to a problem in ciTypeFlow analysis.
 393     uncommon_trap(Deoptimization::Reason_constraint,
 394                   Deoptimization::Action_reinterpret);
 395     set_map(types_are_good);
 396   }
 397 }
 398 
 399 //------------------------------Parse------------------------------------------

 500   // either breakpoint setting or hotswapping of methods may
 501   // cause deoptimization.
 502   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 503     C->dependencies()->assert_evol_method(method());
 504   }
 505 
 506   NOT_PRODUCT(methods_seen++);
 507 
 508   // Do some special top-level things.
 509   if (depth() == 1 && C->is_osr_compilation()) {
 510     _tf = C->tf();     // the OSR entry type is different
 511     _entry_bci = C->entry_bci();
 512     _flow = method()->get_osr_flow_analysis(osr_bci());
 513   } else {
 514     _tf = TypeFunc::make(method());
 515     _entry_bci = InvocationEntryBci;
 516     _flow = method()->get_flow_analysis();
 517   }
 518 
 519   if (_flow->failing()) {
 520     assert(false, "type flow analysis failed during parsing");


 521     C->record_method_not_compilable(_flow->failure_reason());
 522 #ifndef PRODUCT
 523       if (PrintOpto && (Verbose || WizardMode)) {
 524         if (is_osr_parse()) {
 525           tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 526         } else {
 527           tty->print_cr("type flow bailout: %s", _flow->failure_reason());
 528         }
 529         if (Verbose) {
 530           method()->print();
 531           method()->print_codes();
 532           _flow->print();
 533         }
 534       }
 535 #endif
 536   }
 537 
 538 #ifdef ASSERT
 539   if (depth() == 1) {
 540     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");

 776 void Parse::build_exits() {
 777   // make a clone of caller to prevent sharing of side-effects
 778   _exits.set_map(_exits.clone_map());
 779   _exits.clean_stack(_exits.sp());
 780   _exits.sync_jvms();
 781 
 782   RegionNode* region = new RegionNode(1);
 783   record_for_igvn(region);
 784   gvn().set_type_bottom(region);
 785   _exits.set_control(region);
 786 
 787   // Note:  iophi and memphi are not transformed until do_exits.
 788   Node* iophi  = new PhiNode(region, Type::ABIO);
 789   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 790   gvn().set_type_bottom(iophi);
 791   gvn().set_type_bottom(memphi);
 792   _exits.set_i_o(iophi);
 793   _exits.set_all_memory(memphi);
 794 
 795   // Add a return value to the exit state.  (Do not push it yet.)
 796   if (tf()->range()->cnt() > TypeFunc::Parms) {
 797     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
 798     if (ret_type->isa_int()) {
 799       BasicType ret_bt = method()->return_type()->basic_type();
 800       if (ret_bt == T_BOOLEAN ||
 801           ret_bt == T_CHAR ||
 802           ret_bt == T_BYTE ||
 803           ret_bt == T_SHORT) {
 804         ret_type = TypeInt::INT;
 805       }
 806     }
 807 
 808     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 809     // becomes loaded during the subsequent parsing, the loaded and unloaded
 810     // types will not join when we transform and push in do_exits().
 811     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 812     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 813       ret_type = TypeOopPtr::BOTTOM;
 814     }
 815     int         ret_size = type2size[ret_type->basic_type()];
 816     Node*       ret_phi  = new PhiNode(region, ret_type);
 817     gvn().set_type_bottom(ret_phi);
 818     _exits.ensure_stack(ret_size);
 819     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 820     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 821     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 822     // Note:  ret_phi is not yet pushed, until do_exits.
 823   }
 824 }
 825 
 826 
 827 //----------------------------build_start_state-------------------------------
 828 // Construct a state which contains only the incoming arguments from an
 829 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 830 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 831   int        arg_size = tf->domain()->cnt();
 832   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
 833   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 834   SafePointNode* map  = new SafePointNode(max_size, jvms);

 835   record_for_igvn(map);
 836   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 837   Node_Notes* old_nn = default_node_notes();
 838   if (old_nn != nullptr && has_method()) {
 839     Node_Notes* entry_nn = old_nn->clone(this);
 840     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 841     entry_jvms->set_offsets(0);
 842     entry_jvms->set_bci(entry_bci());
 843     entry_nn->set_jvms(entry_jvms);
 844     set_default_node_notes(entry_nn);
 845   }
 846   uint i;
 847   for (i = 0; i < (uint)arg_size; i++) {
 848     Node* parm = initial_gvn()->transform(new ParmNode(start, i));

















 849     map->init_req(i, parm);
 850     // Record all these guys for later GVN.
 851     record_for_igvn(parm);



 852   }
 853   for (; i < map->req(); i++) {
 854     map->init_req(i, top());
 855   }
 856   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 857   set_default_node_notes(old_nn);
 858   jvms->set_map(map);
 859   return jvms;
 860 }
 861 
 862 //-----------------------------make_node_notes---------------------------------
 863 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 864   if (caller_nn == nullptr)  return nullptr;
 865   Node_Notes* nn = caller_nn->clone(C);
 866   JVMState* caller_jvms = nn->jvms();
 867   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 868   jvms->set_offsets(0);
 869   jvms->set_bci(_entry_bci);
 870   nn->set_jvms(jvms);
 871   return nn;
 872 }
 873 
 874 
 875 //--------------------------return_values--------------------------------------
 876 void Compile::return_values(JVMState* jvms) {
 877   GraphKit kit(jvms);
 878   Node* ret = new ReturnNode(TypeFunc::Parms,
 879                              kit.control(),
 880                              kit.i_o(),
 881                              kit.reset_memory(),
 882                              kit.frameptr(),
 883                              kit.returnadr());
 884   // Add zero or 1 return values
 885   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
 886   if (ret_size > 0) {
 887     kit.inc_sp(-ret_size);  // pop the return value(s)
 888     kit.sync_jvms();
 889     ret->add_req(kit.argument(0));
 890     // Note:  The second dummy edge is not needed by a ReturnNode.






















 891   }
 892   // bind it to root
 893   root()->add_req(ret);
 894   record_for_igvn(ret);
 895   initial_gvn()->transform(ret);
 896 }
 897 
 898 //------------------------rethrow_exceptions-----------------------------------
 899 // Bind all exception states in the list into a single RethrowNode.
 900 void Compile::rethrow_exceptions(JVMState* jvms) {
 901   GraphKit kit(jvms);
 902   if (!kit.has_exceptions())  return;  // nothing to generate
 903   // Load my combined exception state into the kit, with all phis transformed:
 904   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 905   Node* ex_oop = kit.use_exception_state(ex_map);
 906   RethrowNode* exit = new RethrowNode(kit.control(),
 907                                       kit.i_o(), kit.reset_memory(),
 908                                       kit.frameptr(), kit.returnadr(),
 909                                       // like a return but with exception input
 910                                       ex_oop);

 994   //    to complete, we force all writes to complete.
 995   //
 996   // 2. Experimental VM option is used to force the barrier if any field
 997   //    was written out in the constructor.
 998   //
 999   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1000   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1001   //    MemBarVolatile is used before volatile load instead of after volatile
1002   //    store, so there's no barrier after the store.
1003   //    We want to guarantee the same behavior as on platforms with total store
1004   //    order, although this is not required by the Java memory model.
1005   //    In this case, we want to enforce visibility of volatile field
1006   //    initializations which are performed in constructors.
1007   //    So as with finals, we add a barrier here.
1008   //
1009   // "All bets are off" unless the first publication occurs after a
1010   // normal return from the constructor.  We do not attempt to detect
1011   // such unusual early publications.  But no barrier is needed on
1012   // exceptional returns, since they cannot publish normally.
1013   //
1014   if (method()->is_object_initializer() &&
1015        (wrote_final() || wrote_stable() ||
1016          (AlwaysSafeConstructors && wrote_fields()) ||
1017          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1018     Node* recorded_alloc = alloc_with_final_or_stable();
1019     _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1020                           recorded_alloc);
1021 
1022     // If Memory barrier is created for final fields write
1023     // and allocation node does not escape the initialize method,
1024     // then barrier introduced by allocation node can be removed.
1025     if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1026       AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1027       alloc->compute_MemBar_redundancy(method());
1028     }
1029     if (PrintOpto && (Verbose || WizardMode)) {
1030       method()->print_name();
1031       tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1032     }
1033   }
1034 
1035   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1036     // transform each slice of the original memphi:
1037     mms.set_memory(_gvn.transform(mms.memory()));
1038   }
1039   // Clean up input MergeMems created by transforming the slices
1040   _gvn.transform(_exits.merged_memory());
1041 
1042   if (tf()->range()->cnt() > TypeFunc::Parms) {
1043     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1044     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1045     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1046       // If the type we set for the ret_phi in build_exits() is too optimistic and
1047       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1048       // loading.  It could also be due to an error, so mark this method as not compilable because
1049       // otherwise this could lead to an infinite compile loop.
1050       // In any case, this code path is rarely (and never in my testing) reached.
1051       C->record_method_not_compilable("Can't determine return type.");
1052       return;
1053     }
1054     if (ret_type->isa_int()) {
1055       BasicType ret_bt = method()->return_type()->basic_type();
1056       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1057     }
1058     _exits.push_node(ret_type->basic_type(), ret_phi);
1059   }
1060 
1061   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1062 
1063   // Unlock along the exceptional paths.

1117 
1118 //-----------------------------create_entry_map-------------------------------
1119 // Initialize our parser map to contain the types at method entry.
1120 // For OSR, the map contains a single RawPtr parameter.
1121 // Initial monitor locking for sync. methods is performed by do_method_entry.
1122 SafePointNode* Parse::create_entry_map() {
1123   // Check for really stupid bail-out cases.
1124   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1125   if (len >= 32760) {
1126     // Bailout expected, this is a very rare edge case.
1127     C->record_method_not_compilable("too many local variables");
1128     return nullptr;
1129   }
1130 
1131   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1132   _caller->map()->delete_replaced_nodes();
1133 
1134   // If this is an inlined method, we may have to do a receiver null check.
1135   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1136     GraphKit kit(_caller);
1137     kit.null_check_receiver_before_call(method());

1138     _caller = kit.transfer_exceptions_into_jvms();

1139     if (kit.stopped()) {
1140       _exits.add_exception_states_from(_caller);
1141       _exits.set_jvms(_caller);
1142       return nullptr;
1143     }
1144   }
1145 
1146   assert(method() != nullptr, "parser must have a method");
1147 
1148   // Create an initial safepoint to hold JVM state during parsing
1149   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1150   set_map(new SafePointNode(len, jvms));
1151 
1152   // Capture receiver info for compiled lambda forms.
1153   if (method()->is_compiled_lambda_form()) {
1154     ciInstance* recv_info = _caller->compute_receiver_info(method());
1155     jvms->set_receiver_info(recv_info);
1156   }
1157 
1158   jvms->set_map(map());

1162   SafePointNode* inmap = _caller->map();
1163   assert(inmap != nullptr, "must have inmap");
1164   // In case of null check on receiver above
1165   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1166 
1167   uint i;
1168 
1169   // Pass thru the predefined input parameters.
1170   for (i = 0; i < TypeFunc::Parms; i++) {
1171     map()->init_req(i, inmap->in(i));
1172   }
1173 
1174   if (depth() == 1) {
1175     assert(map()->memory()->Opcode() == Op_Parm, "");
1176     // Insert the memory aliasing node
1177     set_all_memory(reset_memory());
1178   }
1179   assert(merged_memory(), "");
1180 
1181   // Now add the locals which are initially bound to arguments:
1182   uint arg_size = tf()->domain()->cnt();
1183   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1184   for (i = TypeFunc::Parms; i < arg_size; i++) {
1185     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1186   }
1187 
1188   // Clear out the rest of the map (locals and stack)
1189   for (i = arg_size; i < len; i++) {
1190     map()->init_req(i, top());
1191   }
1192 
1193   SafePointNode* entry_map = stop();
1194   return entry_map;
1195 }
1196 
1197 //-----------------------------do_method_entry--------------------------------
1198 // Emit any code needed in the pseudo-block before BCI zero.
1199 // The main thing to do is lock the receiver of a synchronized method.
1200 void Parse::do_method_entry() {
1201   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1202   set_sp(0);                         // Java Stack Pointer
1203 
1204   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1205 






























1206   if (C->env()->dtrace_method_probes()) {
1207     make_dtrace_method_entry(method());
1208   }
1209 
1210 #ifdef ASSERT
1211   // Narrow receiver type when it is too broad for the method being parsed.
1212   if (!method()->is_static()) {
1213     ciInstanceKlass* callee_holder = method()->holder();
1214     const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1215 
1216     Node* receiver_obj = local(0);
1217     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1218 
1219     if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1220       // Receiver should always be a subtype of callee holder.
1221       // But, since C2 type system doesn't properly track interfaces,
1222       // the invariant can't be expressed in the type system for default methods.
1223       // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1224       assert(callee_holder->is_interface(), "missing subtype check");
1225 

1235 
1236   // If the method is synchronized, we need to construct a lock node, attach
1237   // it to the Start node, and pin it there.
1238   if (method()->is_synchronized()) {
1239     // Insert a FastLockNode right after the Start which takes as arguments
1240     // the current thread pointer, the "this" pointer & the address of the
1241     // stack slot pair used for the lock.  The "this" pointer is a projection
1242     // off the start node, but the locking spot has to be constructed by
1243     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1244     // becomes the second argument to the FastLockNode call.  The
1245     // FastLockNode becomes the new control parent to pin it to the start.
1246 
1247     // Setup Object Pointer
1248     Node *lock_obj = nullptr;
1249     if (method()->is_static()) {
1250       ciInstance* mirror = _method->holder()->java_mirror();
1251       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1252       lock_obj = makecon(t_lock);
1253     } else {                  // Else pass the "this" pointer,
1254       lock_obj = local(0);    // which is Parm0 from StartNode

1255     }
1256     // Clear out dead values from the debug info.
1257     kill_dead_locals();
1258     // Build the FastLockNode
1259     _synch_lock = shared_lock(lock_obj);
1260     // Check for bailout in shared_lock
1261     if (failing()) { return; }
1262   }
1263 
1264   // Feed profiling data for parameters to the type system so it can
1265   // propagate it as speculative types
1266   record_profiled_parameters_for_speculation();












































1267 }
1268 
1269 //------------------------------init_blocks------------------------------------
1270 // Initialize our parser map to contain the types/monitors at method entry.
1271 void Parse::init_blocks() {
1272   // Create the blocks.
1273   _block_count = flow()->block_count();
1274   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1275 
1276   // Initialize the structs.
1277   for (int rpo = 0; rpo < block_count(); rpo++) {
1278     Block* block = rpo_at(rpo);
1279     new(block) Block(this, rpo);
1280   }
1281 
1282   // Collect predecessor and successor information.
1283   for (int rpo = 0; rpo < block_count(); rpo++) {
1284     Block* block = rpo_at(rpo);
1285     block->init_graph(this);
1286   }

1670 //--------------------handle_missing_successor---------------------------------
1671 void Parse::handle_missing_successor(int target_bci) {
1672 #ifndef PRODUCT
1673   Block* b = block();
1674   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1675   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1676 #endif
1677   ShouldNotReachHere();
1678 }
1679 
1680 //--------------------------merge_common---------------------------------------
1681 void Parse::merge_common(Parse::Block* target, int pnum) {
1682   if (TraceOptoParse) {
1683     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1684   }
1685 
1686   // Zap extra stack slots to top
1687   assert(sp() == target->start_sp(), "");
1688   clean_stack(sp());
1689 


















































1690   if (!target->is_merged()) {   // No prior mapping at this bci
1691     if (TraceOptoParse) { tty->print(" with empty state");  }
1692 
1693     // If this path is dead, do not bother capturing it as a merge.
1694     // It is "as if" we had 1 fewer predecessors from the beginning.
1695     if (stopped()) {
1696       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1697       return;
1698     }
1699 
1700     // Make a region if we know there are multiple or unpredictable inputs.
1701     // (Also, if this is a plain fall-through, we might see another region,
1702     // which must not be allowed into this block's map.)
1703     if (pnum > PhiNode::Input         // Known multiple inputs.
1704         || target->is_handler()       // These have unpredictable inputs.
1705         || target->is_loop_head()     // Known multiple inputs
1706         || control()->is_Region()) {  // We must hide this guy.
1707 
1708       int current_bci = bci();
1709       set_parse_bci(target->start()); // Set target bci

1724       record_for_igvn(r);
1725       // zap all inputs to null for debugging (done in Node(uint) constructor)
1726       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1727       r->init_req(pnum, control());
1728       set_control(r);
1729       target->copy_irreducible_status_to(r, jvms());
1730       set_parse_bci(current_bci); // Restore bci
1731     }
1732 
1733     // Convert the existing Parser mapping into a mapping at this bci.
1734     store_state_to(target);
1735     assert(target->is_merged(), "do not come here twice");
1736 
1737   } else {                      // Prior mapping at this bci
1738     if (TraceOptoParse) {  tty->print(" with previous state"); }
1739 #ifdef ASSERT
1740     if (target->is_SEL_head()) {
1741       target->mark_merged_backedge(block());
1742     }
1743 #endif

1744     // We must not manufacture more phis if the target is already parsed.
1745     bool nophi = target->is_parsed();
1746 
1747     SafePointNode* newin = map();// Hang on to incoming mapping
1748     Block* save_block = block(); // Hang on to incoming block;
1749     load_state_from(target);    // Get prior mapping
1750 
1751     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1752     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1753     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1754     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1755 
1756     // Iterate over my current mapping and the old mapping.
1757     // Where different, insert Phi functions.
1758     // Use any existing Phi functions.
1759     assert(control()->is_Region(), "must be merging to a region");
1760     RegionNode* r = control()->as_Region();
1761 
1762     // Compute where to merge into
1763     // Merge incoming control path
1764     r->init_req(pnum, newin->control());
1765 
1766     if (pnum == 1) {            // Last merge for this Region?
1767       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1768         Node* result = _gvn.transform(r);
1769         if (r != result && TraceOptoParse) {
1770           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1771         }
1772       }
1773       record_for_igvn(r);
1774     }
1775 
1776     // Update all the non-control inputs to map:
1777     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1778     bool check_elide_phi = target->is_SEL_backedge(save_block);

1779     for (uint j = 1; j < newin->req(); j++) {
1780       Node* m = map()->in(j);   // Current state of target.
1781       Node* n = newin->in(j);   // Incoming change to target state.
1782       PhiNode* phi;
1783       if (m->is_Phi() && m->as_Phi()->region() == r)
1784         phi = m->as_Phi();
1785       else


1786         phi = nullptr;

1787       if (m != n) {             // Different; must merge
1788         switch (j) {
1789         // Frame pointer and Return Address never changes
1790         case TypeFunc::FramePtr:// Drop m, use the original value
1791         case TypeFunc::ReturnAdr:
1792           break;
1793         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1794           assert(phi == nullptr, "the merge contains phis, not vice versa");
1795           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1796           continue;
1797         default:                // All normal stuff
1798           if (phi == nullptr) {
1799             const JVMState* jvms = map()->jvms();
1800             if (EliminateNestedLocks &&
1801                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1802               // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1803               // Use old BoxLock node as merged box.
1804               assert(newin->jvms()->is_monitor_box(j), "sanity");
1805               // This assert also tests that nodes are BoxLock.
1806               assert(BoxLockNode::same_slot(n, m), "sanity");

1813                 // Incremental Inlining before EA and Macro nodes elimination.
1814                 //
1815                 // Incremental Inlining is executed after IGVN optimizations
1816                 // during which BoxLock can be marked as Coarsened.
1817                 old_box->set_coarsened(); // Verifies state
1818                 old_box->set_unbalanced();
1819               }
1820               C->gvn_replace_by(n, m);
1821             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1822               phi = ensure_phi(j, nophi);
1823             }
1824           }
1825           break;
1826         }
1827       }
1828       // At this point, n might be top if:
1829       //  - there is no phi (because TypeFlow detected a conflict), or
1830       //  - the corresponding control edges is top (a dead incoming path)
1831       // It is a bug if we create a phi which sees a garbage value on a live path.
1832 
1833       if (phi != nullptr) {

























1834         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1835         assert(phi->region() == r, "");
1836         phi->set_req(pnum, n);  // Then add 'n' to the merge
1837         if (pnum == PhiNode::Input) {
1838           // Last merge for this Phi.
1839           // So far, Phis have had a reasonable type from ciTypeFlow.
1840           // Now _gvn will join that with the meet of current inputs.
1841           // BOTTOM is never permissible here, 'cause pessimistically
1842           // Phis of pointers cannot lose the basic pointer type.
1843           DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
1844           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1845           map()->set_req(j, _gvn.transform(phi));
1846           DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
1847           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1848           record_for_igvn(phi);
1849         }
1850       }
1851     } // End of for all values to be merged
1852 
1853     if (pnum == PhiNode::Input &&
1854         !r->in(0)) {         // The occasional useless Region
1855       assert(control() == r, "");
1856       set_control(r->nonnull_req());
1857     }
1858 
1859     map()->merge_replaced_nodes_with(newin);
1860 
1861     // newin has been subsumed into the lazy merge, and is now dead.
1862     set_block(save_block);
1863 
1864     stop();                     // done with this guy, for now
1865   }
1866 
1867   if (TraceOptoParse) {
1868     tty->print_cr(" on path %d", pnum);
1869   }
1870 
1871   // Done with this parser state.
1872   assert(stopped(), "");
1873 }
1874 

1986 
1987   // Add new path to the region.
1988   uint pnum = r->req();
1989   r->add_req(nullptr);
1990 
1991   for (uint i = 1; i < map->req(); i++) {
1992     Node* n = map->in(i);
1993     if (i == TypeFunc::Memory) {
1994       // Ensure a phi on all currently known memories.
1995       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1996         Node* phi = mms.memory();
1997         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1998           assert(phi->req() == pnum, "must be same size as region");
1999           phi->add_req(nullptr);
2000         }
2001       }
2002     } else {
2003       if (n->is_Phi() && n->as_Phi()->region() == r) {
2004         assert(n->req() == pnum, "must be same size as region");
2005         n->add_req(nullptr);


2006       }
2007     }
2008   }
2009 
2010   return pnum;
2011 }
2012 
2013 //------------------------------ensure_phi-------------------------------------
2014 // Turn the idx'th entry of the current map into a Phi
2015 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2016   SafePointNode* map = this->map();
2017   Node* region = map->control();
2018   assert(region->is_Region(), "");
2019 
2020   Node* o = map->in(idx);
2021   assert(o != nullptr, "");
2022 
2023   if (o == top())  return nullptr; // TOP always merges into TOP
2024 
2025   if (o->is_Phi() && o->as_Phi()->region() == region) {
2026     return o->as_Phi();
2027   }




2028 
2029   // Now use a Phi here for merging
2030   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2031   const JVMState* jvms = map->jvms();
2032   const Type* t = nullptr;
2033   if (jvms->is_loc(idx)) {
2034     t = block()->local_type_at(idx - jvms->locoff());
2035   } else if (jvms->is_stk(idx)) {
2036     t = block()->stack_type_at(idx - jvms->stkoff());
2037   } else if (jvms->is_mon(idx)) {
2038     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2039     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2040   } else if ((uint)idx < TypeFunc::Parms) {
2041     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2042   } else {
2043     assert(false, "no type information for this phi");
2044   }
2045 
2046   // If the type falls to bottom, then this must be a local that
2047   // is mixing ints and oops or some such.  Forcing it to top
2048   // makes it go dead.
2049   if (t == Type::BOTTOM) {
2050     map->set_req(idx, top());
2051     return nullptr;
2052   }
2053 
2054   // Do not create phis for top either.
2055   // A top on a non-null control flow must be an unused even after the.phi.
2056   if (t == Type::TOP || t == Type::HALF) {
2057     map->set_req(idx, top());
2058     return nullptr;
2059   }
2060 
2061   PhiNode* phi = PhiNode::make(region, o, t);
2062   gvn().set_type(phi, t);
2063   if (C->do_escape_analysis()) record_for_igvn(phi);
2064   map->set_req(idx, phi);
2065   return phi;









2066 }
2067 
2068 //--------------------------ensure_memory_phi----------------------------------
2069 // Turn the idx'th slice of the current memory into a Phi
2070 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2071   MergeMemNode* mem = merged_memory();
2072   Node* region = control();
2073   assert(region->is_Region(), "");
2074 
2075   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2076   assert(o != nullptr && o != top(), "");
2077 
2078   PhiNode* phi;
2079   if (o->is_Phi() && o->as_Phi()->region() == region) {
2080     phi = o->as_Phi();
2081     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2082       // clone the shared base memory phi to make a new memory split
2083       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2084       const Type* t = phi->bottom_type();
2085       const TypePtr* adr_type = C->get_adr_type(idx);

2175 // Add check to deoptimize once holder klass is fully initialized.
2176 void Parse::clinit_deopt() {
2177   assert(C->has_method(), "only for normal compilations");
2178   assert(depth() == 1, "only for main compiled method");
2179   assert(is_normal_parse(), "no barrier needed on osr entry");
2180   assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2181 
2182   set_parse_bci(0);
2183 
2184   Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2185   guard_klass_being_initialized(holder);
2186 }
2187 
2188 //------------------------------return_current---------------------------------
2189 // Append current _map to _exit_return
2190 void Parse::return_current(Node* value) {
2191   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2192     call_register_finalizer();
2193   }
2194 


































2195   // Do not set_parse_bci, so that return goo is credited to the return insn.
2196   set_bci(InvocationEntryBci);
2197   if (method()->is_synchronized()) {
2198     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2199   }
2200   if (C->env()->dtrace_method_probes()) {
2201     make_dtrace_method_exit(method());
2202   }

2203   SafePointNode* exit_return = _exits.map();
2204   exit_return->in( TypeFunc::Control  )->add_req( control() );
2205   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2206   Node *mem = exit_return->in( TypeFunc::Memory   );
2207   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2208     if (mms.is_empty()) {
2209       // get a copy of the base memory, and patch just this one input
2210       const TypePtr* adr_type = mms.adr_type(C);
2211       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2212       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2213       gvn().set_type_bottom(phi);
2214       phi->del_req(phi->req()-1);  // prepare to re-patch
2215       mms.set_memory(phi);
2216     }
2217     mms.memory()->add_req(mms.memory2());
2218   }
2219 
2220   // frame pointer is always same, already captured
2221   if (value != nullptr) {
2222     // If returning oops to an interface-return, there is a silent free
2223     // cast from oop to interface allowed by the Verifier.  Make it explicit
2224     // here.
2225     Node* phi = _exits.argument(0);
2226     phi->add_req(value);
2227   }
2228 
2229   if (_first_return) {
2230     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2231     _first_return = false;
2232   } else {
2233     _exits.map()->merge_replaced_nodes_with(map());
2234   }
2235 
2236   stop_and_kill_map();          // This CFG path dies here
2237 }
2238 
2239 
2240 //------------------------------add_safepoint----------------------------------
2241 void Parse::add_safepoint() {
2242   uint parms = TypeFunc::Parms+1;
2243 
2244   // Clear out dead values from the debug info.
2245   kill_dead_locals();
2246 
2247   // Clone the JVM State
2248   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);

   1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciObjArrayKlass.hpp"
  26 #include "ci/ciSignature.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "interpreter/linkResolver.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/method.hpp"
  31 #include "opto/addnode.hpp"
  32 #include "opto/c2compiler.hpp"
  33 #include "opto/castnode.hpp"
  34 #include "opto/convertnode.hpp"
  35 #include "opto/idealGraphPrinter.hpp"
  36 #include "opto/inlinetypenode.hpp"
  37 #include "opto/locknode.hpp"
  38 #include "opto/memnode.hpp"
  39 #include "opto/opaquenode.hpp"
  40 #include "opto/parse.hpp"
  41 #include "opto/rootnode.hpp"
  42 #include "opto/runtime.hpp"
  43 #include "opto/type.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/safepointMechanism.hpp"
  47 #include "runtime/sharedRuntime.hpp"
  48 #include "utilities/bitMap.inline.hpp"
  49 #include "utilities/copy.hpp"
  50 
  51 // Static array so we can figure out which bytecodes stop us from compiling
  52 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  53 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  54 
  55 #ifndef PRODUCT
  56 uint nodes_created             = 0;
  57 uint methods_parsed            = 0;
  58 uint methods_seen              = 0;
  59 uint blocks_parsed             = 0;
  60 uint blocks_seen               = 0;
  61 
  62 uint explicit_null_checks_inserted = 0;
  63 uint explicit_null_checks_elided   = 0;
  64 uint all_null_checks_found         = 0;

  89   }
  90   if (all_null_checks_found) {
  91     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  92                   (100*implicit_null_checks)/all_null_checks_found);
  93   }
  94   if (SharedRuntime::_implicit_null_throws) {
  95     tty->print_cr("%u implicit null exceptions at runtime",
  96                   SharedRuntime::_implicit_null_throws);
  97   }
  98 
  99   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
 100     BytecodeParseHistogram::print();
 101   }
 102 }
 103 #endif
 104 
 105 //------------------------------ON STACK REPLACEMENT---------------------------
 106 
 107 // Construct a node which can be used to get incoming state for
 108 // on stack replacement.
 109 Node* Parse::fetch_interpreter_state(int index,
 110                                      const Type* type,
 111                                      Node* local_addrs) {
 112   BasicType bt = type->basic_type();
 113   if (type == TypePtr::NULL_PTR) {
 114     // Ptr types are mixed together with T_ADDRESS but nullptr is
 115     // really for T_OBJECT types so correct it.
 116     bt = T_OBJECT;
 117   }
 118   Node *mem = memory(Compile::AliasIdxRaw);
 119   Node *adr = basic_plus_adr(top(), local_addrs, -index*wordSize);
 120   Node *ctl = control();
 121 
 122   // Very similar to LoadNode::make, except we handle un-aligned longs and
 123   // doubles on Sparc.  Intel can handle them just fine directly.
 124   Node *l = nullptr;
 125   switch (bt) {                // Signature is flattened
 126   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 127   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 128   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 129   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 130   case T_LONG:
 131   case T_DOUBLE: {
 132     // Since arguments are in reverse order, the argument address 'adr'
 133     // refers to the back half of the long/double.  Recompute adr.
 134     adr = basic_plus_adr(top(), local_addrs, -(index+1)*wordSize);
 135     if (Matcher::misaligned_doubles_ok) {
 136       l = (bt == T_DOUBLE)
 137         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 138         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 139     } else {
 140       l = (bt == T_DOUBLE)
 141         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 142         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 143     }
 144     break;
 145   }
 146   default: ShouldNotReachHere();
 147   }
 148   return _gvn.transform(l);
 149 }
 150 
 151 // Helper routine to prevent the interpreter from handing
 152 // unexpected typestate to an OSR method.
 153 // The Node l is a value newly dug out of the interpreter frame.
 154 // The type is the type predicted by ciTypeFlow.  Note that it is
 155 // not a general type, but can only come from Type::get_typeflow_type.
 156 // The safepoint is a map which will feed an uncommon trap.
 157 Node* Parse::check_interpreter_type(Node* l, const Type* type, const TypeKlassPtr* klass_type,
 158                                     SafePointNode* &bad_type_exit, bool is_early_larval) {

 159   const TypeOopPtr* tp = type->isa_oopptr();
 160 
 161   // TypeFlow may assert null-ness if a type appears unloaded.
 162   if (type == TypePtr::NULL_PTR ||
 163       (tp != nullptr && !tp->is_loaded())) {
 164     // Value must be null, not a real oop.
 165     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 166     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 167     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 168     set_control(_gvn.transform( new IfTrueNode(iff) ));
 169     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 170     bad_type_exit->control()->add_req(bad_type);
 171     l = null();
 172   }
 173 
 174   // Typeflow can also cut off paths from the CFG, based on
 175   // types which appear unloaded, or call sites which appear unlinked.
 176   // When paths are cut off, values at later merge points can rise
 177   // toward more specific classes.  Make sure these specific classes
 178   // are still in effect.
 179   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 180     // TypeFlow asserted a specific object type.  Value must have that type.
 181     Node* bad_type_ctrl = nullptr;
 182     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
 183       // Check inline types for null here to prevent checkcast from adding an
 184       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
 185       l = null_check_oop(l, &bad_type_ctrl);
 186       bad_type_exit->control()->add_req(bad_type_ctrl);
 187     }
 188 
 189     l = gen_checkcast(l, makecon(klass_type), &bad_type_ctrl, false, is_early_larval);
 190     bad_type_exit->control()->add_req(bad_type_ctrl);
 191   }
 192 
 193   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 194   return l;
 195 }
 196 
 197 // Helper routine which sets up elements of the initial parser map when
 198 // performing a parse for on stack replacement.  Add values into map.
 199 // The only parameter contains the address of a interpreter arguments.
 200 void Parse::load_interpreter_state(Node* osr_buf) {
 201   int index;
 202   int max_locals = jvms()->loc_size();
 203   int max_stack  = jvms()->stk_size();
 204 

 205   // Mismatch between method and jvms can occur since map briefly held
 206   // an OSR entry state (which takes up one RawPtr word).
 207   assert(max_locals == method()->max_locals(), "sanity");
 208   assert(max_stack  >= method()->max_stack(),  "sanity");
 209   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 210   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 211 
 212   // Find the start block.
 213   Block* osr_block = start_block();
 214   assert(osr_block->start() == osr_bci(), "sanity");
 215 
 216   // Set initial BCI.
 217   set_parse_bci(osr_block->start());
 218 
 219   // Set initial stack depth.
 220   set_sp(osr_block->start_sp());
 221 
 222   // Check bailouts.  We currently do not perform on stack replacement
 223   // of loops in catch blocks or loops which branch with a non-empty stack.
 224   if (sp() != 0) {

 239   for (index = 0; index < mcnt; index++) {
 240     // Make a BoxLockNode for the monitor.
 241     BoxLockNode* osr_box = new BoxLockNode(next_monitor());
 242     // Check for bailout after new BoxLockNode
 243     if (failing()) { return; }
 244 
 245     // This OSR locking region is unbalanced because it does not have Lock node:
 246     // locking was done in Interpreter.
 247     // This is similar to Coarsened case when Lock node is eliminated
 248     // and as result the region is marked as Unbalanced.
 249 
 250     // Emulate Coarsened state transition from Regular to Unbalanced.
 251     osr_box->set_coarsened();
 252     osr_box->set_unbalanced();
 253 
 254     Node* box = _gvn.transform(osr_box);
 255 
 256     // Displaced headers and locked objects are interleaved in the
 257     // temp OSR buffer.  We only copy the locked objects out here.
 258     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 259     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr);
 260     // Try and copy the displaced header to the BoxNode
 261     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr);

 262 
 263     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, MemNode::unordered);
 264 
 265     // Build a bogus FastLockNode (no code will be generated) and push the
 266     // monitor into our debug info.
 267     const FastLockNode *flock = _gvn.transform(new FastLockNode( nullptr, lock_object, box ))->as_FastLock();
 268     map()->push_monitor(flock);
 269 
 270     // If the lock is our method synchronization lock, tuck it away in
 271     // _sync_lock for return and rethrow exit paths.
 272     if (index == 0 && method()->is_synchronized()) {
 273       _synch_lock = flock;
 274     }
 275   }
 276 
 277   // Use the raw liveness computation to make sure that unexpected
 278   // values don't propagate into the OSR frame.
 279   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 280   if (!live_locals.is_valid()) {
 281     // Degenerate or breakpointed method.

 309         if (C->log() != nullptr) {
 310           C->log()->elem("OSR_mismatch local_index='%d'",index);
 311         }
 312         set_local(index, null());
 313         // and ignore it for the loads
 314         continue;
 315       }
 316     }
 317 
 318     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 319     if (type == Type::TOP || type == Type::HALF) {
 320       continue;
 321     }
 322     // If the type falls to bottom, then this must be a local that
 323     // is mixing ints and oops or some such.  Forcing it to top
 324     // makes it go dead.
 325     if (type == Type::BOTTOM) {
 326       continue;
 327     }
 328     // Construct code to access the appropriate local.
 329     Node* value = fetch_interpreter_state(index, type, locals_addr);






 330     set_local(index, value);
 331   }
 332 
 333   // Extract the needed stack entries from the interpreter frame.
 334   for (index = 0; index < sp(); index++) {
 335     const Type *type = osr_block->stack_type_at(index);
 336     if (type != Type::TOP) {
 337       // Currently the compiler bails out when attempting to on stack replace
 338       // at a bci with a non-empty stack.  We should not reach here.
 339       ShouldNotReachHere();
 340     }
 341   }
 342 
 343   // End the OSR migration
 344   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 345                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 346                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 347                     osr_buf);
 348 
 349   // Now that the interpreter state is loaded, make sure it will match

 360     if (type->isa_oopptr() != nullptr) {
 361       if (!live_oops.at(index)) {
 362         // skip type check for dead oops
 363         continue;
 364       }
 365     }
 366     if (osr_block->flow()->local_type_at(index)->is_return_address()) {
 367       // In our current system it's illegal for jsr addresses to be
 368       // live into an OSR entry point because the compiler performs
 369       // inlining of jsrs.  ciTypeFlow has a bailout that detect this
 370       // case and aborts the compile if addresses are live into an OSR
 371       // entry point.  Because of that we can assume that any address
 372       // locals at the OSR entry point are dead.  Method liveness
 373       // isn't precise enough to figure out that they are dead in all
 374       // cases so simply skip checking address locals all
 375       // together. Any type check is guaranteed to fail since the
 376       // interpreter type is the result of a load which might have any
 377       // value and the expected type is a constant.
 378       continue;
 379     }
 380     const TypeKlassPtr* klass_type = nullptr;
 381     if (type->isa_oopptr()) {
 382       klass_type = TypeKlassPtr::make(osr_block->flow()->local_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
 383       klass_type = klass_type->try_improve();
 384     }
 385     bool is_early_larval = osr_block->flow()->local_type_at(index)->is_early_larval();
 386     set_local(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
 387   }
 388 
 389   for (index = 0; index < sp(); index++) {
 390     if (stopped())  break;
 391     Node* l = stack(index);
 392     if (l->is_top())  continue;  // nothing here
 393     const Type* type = osr_block->stack_type_at(index);
 394     const TypeKlassPtr* klass_type = nullptr;
 395     if (type->isa_oopptr()) {
 396       klass_type = TypeKlassPtr::make(osr_block->flow()->stack_type_at(index)->unwrap()->as_klass(), Type::ignore_interfaces);
 397       klass_type = klass_type->try_improve();
 398     }
 399     bool is_early_larval = osr_block->flow()->stack_type_at(index)->is_early_larval();
 400     set_stack(index, check_interpreter_type(l, type, klass_type, bad_type_exit, is_early_larval));
 401   }
 402 
 403   if (bad_type_exit->control()->req() > 1) {
 404     // Build an uncommon trap here, if any inputs can be unexpected.
 405     bad_type_exit->set_control(_gvn.transform( bad_type_exit->control() ));
 406     record_for_igvn(bad_type_exit->control());
 407     SafePointNode* types_are_good = map();
 408     set_map(bad_type_exit);
 409     // The unexpected type happens because a new edge is active
 410     // in the CFG, which typeflow had previously ignored.
 411     // E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
 412     // This x will be typed as Integer if notReached is not yet linked.
 413     // It could also happen due to a problem in ciTypeFlow analysis.
 414     uncommon_trap(Deoptimization::Reason_constraint,
 415                   Deoptimization::Action_reinterpret);
 416     set_map(types_are_good);
 417   }
 418 }
 419 
 420 //------------------------------Parse------------------------------------------

 521   // either breakpoint setting or hotswapping of methods may
 522   // cause deoptimization.
 523   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 524     C->dependencies()->assert_evol_method(method());
 525   }
 526 
 527   NOT_PRODUCT(methods_seen++);
 528 
 529   // Do some special top-level things.
 530   if (depth() == 1 && C->is_osr_compilation()) {
 531     _tf = C->tf();     // the OSR entry type is different
 532     _entry_bci = C->entry_bci();
 533     _flow = method()->get_osr_flow_analysis(osr_bci());
 534   } else {
 535     _tf = TypeFunc::make(method());
 536     _entry_bci = InvocationEntryBci;
 537     _flow = method()->get_flow_analysis();
 538   }
 539 
 540   if (_flow->failing()) {
 541     // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
 542     // can lead to this. Re-enable once 8284443 is fixed.
 543     //assert(false, "type flow analysis failed during parsing");
 544     C->record_method_not_compilable(_flow->failure_reason());
 545 #ifndef PRODUCT
 546       if (PrintOpto && (Verbose || WizardMode)) {
 547         if (is_osr_parse()) {
 548           tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 549         } else {
 550           tty->print_cr("type flow bailout: %s", _flow->failure_reason());
 551         }
 552         if (Verbose) {
 553           method()->print();
 554           method()->print_codes();
 555           _flow->print();
 556         }
 557       }
 558 #endif
 559   }
 560 
 561 #ifdef ASSERT
 562   if (depth() == 1) {
 563     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");

 799 void Parse::build_exits() {
 800   // make a clone of caller to prevent sharing of side-effects
 801   _exits.set_map(_exits.clone_map());
 802   _exits.clean_stack(_exits.sp());
 803   _exits.sync_jvms();
 804 
 805   RegionNode* region = new RegionNode(1);
 806   record_for_igvn(region);
 807   gvn().set_type_bottom(region);
 808   _exits.set_control(region);
 809 
 810   // Note:  iophi and memphi are not transformed until do_exits.
 811   Node* iophi  = new PhiNode(region, Type::ABIO);
 812   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 813   gvn().set_type_bottom(iophi);
 814   gvn().set_type_bottom(memphi);
 815   _exits.set_i_o(iophi);
 816   _exits.set_all_memory(memphi);
 817 
 818   // Add a return value to the exit state.  (Do not push it yet.)
 819   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 820     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 821     if (ret_type->isa_int()) {
 822       BasicType ret_bt = method()->return_type()->basic_type();
 823       if (ret_bt == T_BOOLEAN ||
 824           ret_bt == T_CHAR ||
 825           ret_bt == T_BYTE ||
 826           ret_bt == T_SHORT) {
 827         ret_type = TypeInt::INT;
 828       }
 829     }
 830 
 831     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 832     // becomes loaded during the subsequent parsing, the loaded and unloaded
 833     // types will not join when we transform and push in do_exits().
 834     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 835     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 836       ret_type = TypeOopPtr::BOTTOM;
 837     }
 838     int         ret_size = type2size[ret_type->basic_type()];
 839     Node*       ret_phi  = new PhiNode(region, ret_type);
 840     gvn().set_type_bottom(ret_phi);
 841     _exits.ensure_stack(ret_size);
 842     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 843     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 844     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 845     // Note:  ret_phi is not yet pushed, until do_exits.
 846   }
 847 }
 848 

 849 //----------------------------build_start_state-------------------------------
 850 // Construct a state which contains only the incoming arguments from an
 851 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 852 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 853   int        arg_size = tf->domain_sig()->cnt();
 854   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
 855   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 856   SafePointNode* map  = new SafePointNode(max_size, jvms);
 857   jvms->set_map(map);
 858   record_for_igvn(map);
 859   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 860   Node_Notes* old_nn = default_node_notes();
 861   if (old_nn != nullptr && has_method()) {
 862     Node_Notes* entry_nn = old_nn->clone(this);
 863     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 864     entry_jvms->set_offsets(0);
 865     entry_jvms->set_bci(entry_bci());
 866     entry_nn->set_jvms(entry_jvms);
 867     set_default_node_notes(entry_nn);
 868   }
 869   PhaseGVN& gvn = *initial_gvn();
 870   uint i = 0;
 871   int arg_num = 0;
 872   for (uint j = 0; i < (uint)arg_size; i++) {
 873     const Type* t = tf->domain_sig()->field_at(i);
 874     Node* parm = nullptr;
 875     if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
 876       // Inline type arguments are not passed by reference: we get an argument per
 877       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 878       GraphKit kit(jvms, &gvn);
 879       kit.set_control(map->control());
 880       Node* old_mem = map->memory();
 881       // Use immutable memory for inline type loads and restore it below
 882       kit.set_all_memory(C->immutable_memory());
 883       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 884       map->set_control(kit.control());
 885       map->set_memory(old_mem);
 886     } else {
 887       parm = gvn.transform(new ParmNode(start, j++));
 888     }
 889     map->init_req(i, parm);
 890     // Record all these guys for later GVN.
 891     record_for_igvn(parm);
 892     if (i >= TypeFunc::Parms && t != Type::HALF) {
 893       arg_num++;
 894     }
 895   }
 896   for (; i < map->req(); i++) {
 897     map->init_req(i, top());
 898   }
 899   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 900   set_default_node_notes(old_nn);

 901   return jvms;
 902 }
 903 
 904 //-----------------------------make_node_notes---------------------------------
 905 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 906   if (caller_nn == nullptr)  return nullptr;
 907   Node_Notes* nn = caller_nn->clone(C);
 908   JVMState* caller_jvms = nn->jvms();
 909   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 910   jvms->set_offsets(0);
 911   jvms->set_bci(_entry_bci);
 912   nn->set_jvms(jvms);
 913   return nn;
 914 }
 915 
 916 
 917 //--------------------------return_values--------------------------------------
 918 void Compile::return_values(JVMState* jvms) {
 919   GraphKit kit(jvms);
 920   Node* ret = new ReturnNode(TypeFunc::Parms,
 921                              kit.control(),
 922                              kit.i_o(),
 923                              kit.reset_memory(),
 924                              kit.frameptr(),
 925                              kit.returnadr());
 926   // Add zero or 1 return values
 927   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
 928   if (ret_size > 0) {
 929     kit.inc_sp(-ret_size);  // pop the return value(s)
 930     kit.sync_jvms();
 931     Node* res = kit.argument(0);
 932     if (tf()->returns_inline_type_as_fields()) {
 933       // Multiple return values (inline type fields): add as many edges
 934       // to the Return node as returned values.
 935       InlineTypeNode* vt = res->as_InlineType();
 936       ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
 937       if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
 938         ret->init_req(TypeFunc::Parms, vt);
 939       } else {
 940         // Return the tagged klass pointer to signal scalarization to the caller
 941         Node* tagged_klass = vt->tagged_klass(kit.gvn());
 942         // Return null if the inline type is null (null marker field is not set)
 943         Node* conv   = kit.gvn().transform(new ConvI2LNode(vt->get_null_marker()));
 944         Node* shl    = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
 945         Node* shr    = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
 946         tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
 947         ret->init_req(TypeFunc::Parms, tagged_klass);
 948       }
 949       uint idx = TypeFunc::Parms + 1;
 950       vt->pass_fields(&kit, ret, idx, false, false);
 951     } else {
 952       ret->add_req(res);
 953       // Note:  The second dummy edge is not needed by a ReturnNode.
 954     }
 955   }
 956   // bind it to root
 957   root()->add_req(ret);
 958   record_for_igvn(ret);
 959   initial_gvn()->transform(ret);
 960 }
 961 
 962 //------------------------rethrow_exceptions-----------------------------------
 963 // Bind all exception states in the list into a single RethrowNode.
 964 void Compile::rethrow_exceptions(JVMState* jvms) {
 965   GraphKit kit(jvms);
 966   if (!kit.has_exceptions())  return;  // nothing to generate
 967   // Load my combined exception state into the kit, with all phis transformed:
 968   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 969   Node* ex_oop = kit.use_exception_state(ex_map);
 970   RethrowNode* exit = new RethrowNode(kit.control(),
 971                                       kit.i_o(), kit.reset_memory(),
 972                                       kit.frameptr(), kit.returnadr(),
 973                                       // like a return but with exception input
 974                                       ex_oop);

1058   //    to complete, we force all writes to complete.
1059   //
1060   // 2. Experimental VM option is used to force the barrier if any field
1061   //    was written out in the constructor.
1062   //
1063   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1064   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1065   //    MemBarVolatile is used before volatile load instead of after volatile
1066   //    store, so there's no barrier after the store.
1067   //    We want to guarantee the same behavior as on platforms with total store
1068   //    order, although this is not required by the Java memory model.
1069   //    In this case, we want to enforce visibility of volatile field
1070   //    initializations which are performed in constructors.
1071   //    So as with finals, we add a barrier here.
1072   //
1073   // "All bets are off" unless the first publication occurs after a
1074   // normal return from the constructor.  We do not attempt to detect
1075   // such unusual early publications.  But no barrier is needed on
1076   // exceptional returns, since they cannot publish normally.
1077   //
1078   if ((method()->is_object_constructor() || method()->is_class_initializer()) &&
1079        (wrote_final() || wrote_stable() ||
1080          (AlwaysSafeConstructors && wrote_fields()) ||
1081          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1082     Node* recorded_alloc = alloc_with_final_or_stable();
1083     _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease,
1084                           recorded_alloc);
1085 
1086     // If Memory barrier is created for final fields write
1087     // and allocation node does not escape the initialize method,
1088     // then barrier introduced by allocation node can be removed.
1089     if (DoEscapeAnalysis && (recorded_alloc != nullptr)) {
1090       AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc);
1091       alloc->compute_MemBar_redundancy(method());
1092     }
1093     if (PrintOpto && (Verbose || WizardMode)) {
1094       method()->print_name();
1095       tty->print_cr(" writes finals/@Stable and needs a memory barrier");
1096     }
1097   }
1098 
1099   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1100     // transform each slice of the original memphi:
1101     mms.set_memory(_gvn.transform(mms.memory()));
1102   }
1103   // Clean up input MergeMems created by transforming the slices
1104   _gvn.transform(_exits.merged_memory());
1105 
1106   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1107     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1108     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1109     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1110       // If the type we set for the ret_phi in build_exits() is too optimistic and
1111       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1112       // loading.  It could also be due to an error, so mark this method as not compilable because
1113       // otherwise this could lead to an infinite compile loop.
1114       // In any case, this code path is rarely (and never in my testing) reached.
1115       C->record_method_not_compilable("Can't determine return type.");
1116       return;
1117     }
1118     if (ret_type->isa_int()) {
1119       BasicType ret_bt = method()->return_type()->basic_type();
1120       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1121     }
1122     _exits.push_node(ret_type->basic_type(), ret_phi);
1123   }
1124 
1125   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1126 
1127   // Unlock along the exceptional paths.

1181 
1182 //-----------------------------create_entry_map-------------------------------
1183 // Initialize our parser map to contain the types at method entry.
1184 // For OSR, the map contains a single RawPtr parameter.
1185 // Initial monitor locking for sync. methods is performed by do_method_entry.
1186 SafePointNode* Parse::create_entry_map() {
1187   // Check for really stupid bail-out cases.
1188   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1189   if (len >= 32760) {
1190     // Bailout expected, this is a very rare edge case.
1191     C->record_method_not_compilable("too many local variables");
1192     return nullptr;
1193   }
1194 
1195   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1196   _caller->map()->delete_replaced_nodes();
1197 
1198   // If this is an inlined method, we may have to do a receiver null check.
1199   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1200     GraphKit kit(_caller);
1201     Node* receiver = kit.argument(0);
1202     Node* null_free = kit.null_check_receiver_before_call(method());
1203     _caller = kit.transfer_exceptions_into_jvms();
1204 
1205     if (kit.stopped()) {
1206       _exits.add_exception_states_from(_caller);
1207       _exits.set_jvms(_caller);
1208       return nullptr;
1209     }
1210   }
1211 
1212   assert(method() != nullptr, "parser must have a method");
1213 
1214   // Create an initial safepoint to hold JVM state during parsing
1215   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1216   set_map(new SafePointNode(len, jvms));
1217 
1218   // Capture receiver info for compiled lambda forms.
1219   if (method()->is_compiled_lambda_form()) {
1220     ciInstance* recv_info = _caller->compute_receiver_info(method());
1221     jvms->set_receiver_info(recv_info);
1222   }
1223 
1224   jvms->set_map(map());

1228   SafePointNode* inmap = _caller->map();
1229   assert(inmap != nullptr, "must have inmap");
1230   // In case of null check on receiver above
1231   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1232 
1233   uint i;
1234 
1235   // Pass thru the predefined input parameters.
1236   for (i = 0; i < TypeFunc::Parms; i++) {
1237     map()->init_req(i, inmap->in(i));
1238   }
1239 
1240   if (depth() == 1) {
1241     assert(map()->memory()->Opcode() == Op_Parm, "");
1242     // Insert the memory aliasing node
1243     set_all_memory(reset_memory());
1244   }
1245   assert(merged_memory(), "");
1246 
1247   // Now add the locals which are initially bound to arguments:
1248   uint arg_size = tf()->domain_sig()->cnt();
1249   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1250   for (i = TypeFunc::Parms; i < arg_size; i++) {
1251     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1252   }
1253 
1254   // Clear out the rest of the map (locals and stack)
1255   for (i = arg_size; i < len; i++) {
1256     map()->init_req(i, top());
1257   }
1258 
1259   SafePointNode* entry_map = stop();
1260   return entry_map;
1261 }
1262 
1263 //-----------------------------do_method_entry--------------------------------
1264 // Emit any code needed in the pseudo-block before BCI zero.
1265 // The main thing to do is lock the receiver of a synchronized method.
1266 void Parse::do_method_entry() {
1267   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1268   set_sp(0);                         // Java Stack Pointer
1269 
1270   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
1271 
1272   // Check if we need a membar at the beginning of the java.lang.Object
1273   // constructor to satisfy the memory model for strict fields.
1274   if (Arguments::is_valhalla_enabled() && method()->intrinsic_id() == vmIntrinsics::_Object_init) {
1275     Node* receiver_obj = local(0);
1276     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1277     // If there's no exact type, check if the declared type has no implementors and add a dependency
1278     const TypeKlassPtr* klass_ptr = receiver_type->as_klass_type(/* try_for_exact= */ true);
1279     ciType* klass = klass_ptr->klass_is_exact() ? klass_ptr->exact_klass() : nullptr;
1280     if (klass != nullptr && klass->is_instance_klass()) {
1281       // Exact receiver type, check if there is a strict field
1282       ciInstanceKlass* holder = klass->as_instance_klass();
1283       for (int i = 0; i < holder->nof_nonstatic_fields(); i++) {
1284         ciField* field = holder->nonstatic_field_at(i);
1285         if (field->is_strict()) {
1286           // Found a strict field, a membar is needed
1287           AllocateNode* alloc = AllocateNode::Ideal_allocation(receiver_obj);
1288           insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, receiver_obj);
1289           if (DoEscapeAnalysis && (alloc != nullptr)) {
1290             alloc->compute_MemBar_redundancy(method());
1291           }
1292           break;
1293         }
1294       }
1295     } else if (klass == nullptr) {
1296       // We can't statically determine the type of the receiver and therefore need
1297       // to put a membar here because it could have a strict field.
1298       insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease);
1299     }
1300   }
1301 
1302   if (C->env()->dtrace_method_probes()) {
1303     make_dtrace_method_entry(method());
1304   }
1305 
1306 #ifdef ASSERT
1307   // Narrow receiver type when it is too broad for the method being parsed.
1308   if (!method()->is_static()) {
1309     ciInstanceKlass* callee_holder = method()->holder();
1310     const Type* holder_type = TypeInstPtr::make(TypePtr::BotPTR, callee_holder, Type::trust_interfaces);
1311 
1312     Node* receiver_obj = local(0);
1313     const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr();
1314 
1315     if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) {
1316       // Receiver should always be a subtype of callee holder.
1317       // But, since C2 type system doesn't properly track interfaces,
1318       // the invariant can't be expressed in the type system for default methods.
1319       // Example: for unrelated C <: I and D <: I, (C `meet` D) = Object </: I.
1320       assert(callee_holder->is_interface(), "missing subtype check");
1321 

1331 
1332   // If the method is synchronized, we need to construct a lock node, attach
1333   // it to the Start node, and pin it there.
1334   if (method()->is_synchronized()) {
1335     // Insert a FastLockNode right after the Start which takes as arguments
1336     // the current thread pointer, the "this" pointer & the address of the
1337     // stack slot pair used for the lock.  The "this" pointer is a projection
1338     // off the start node, but the locking spot has to be constructed by
1339     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1340     // becomes the second argument to the FastLockNode call.  The
1341     // FastLockNode becomes the new control parent to pin it to the start.
1342 
1343     // Setup Object Pointer
1344     Node *lock_obj = nullptr;
1345     if (method()->is_static()) {
1346       ciInstance* mirror = _method->holder()->java_mirror();
1347       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1348       lock_obj = makecon(t_lock);
1349     } else {                  // Else pass the "this" pointer,
1350       lock_obj = local(0);    // which is Parm0 from StartNode
1351       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1352     }
1353     // Clear out dead values from the debug info.
1354     kill_dead_locals();
1355     // Build the FastLockNode
1356     _synch_lock = shared_lock(lock_obj);
1357     // Check for bailout in shared_lock
1358     if (failing()) { return; }
1359   }
1360 
1361   // Feed profiling data for parameters to the type system so it can
1362   // propagate it as speculative types
1363   record_profiled_parameters_for_speculation();
1364 
1365   // More argument handling
1366   int arg_size = method()->arg_size();
1367   for (int i = 0; i < arg_size; i++) {
1368     Node* parm = local(i);
1369     const Type* t = _gvn.type(parm);
1370     if (t->is_inlinetypeptr()) {
1371       // If the parameter is a value object, try to scalarize it if we know that it is unrestricted (not early larval)
1372       // Parameters are non-larval except the receiver of a constructor, which must be an early larval object.
1373       if (!(method()->is_object_constructor() && i == 0)) {
1374         // Create InlineTypeNode from the oop and replace the parameter
1375         Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass());
1376         replace_in_map(parm, vt);
1377       }
1378     } else if (UseTypeSpeculation && (i == (arg_size - 1)) && depth() == 1 && method()->has_vararg() && t->isa_aryptr()) {
1379       // Speculate on varargs Object array being the default array refined type. The assumption is
1380       // that a vararg method test(Object... o) is often called as test(o1, o2, o3). javac will
1381       // translate the call so that the caller will create a new default array of Object, put o1,
1382       // o2, o3 into the newly created array, then invoke the method test. This only makes sense if
1383       // the method we are parsing is the top-level method of the compilation unit. Otherwise, if
1384       // it is truly called according to our assumption, we must know the exact type of the
1385       // argument because the allocation happens inside the compilation unit.
1386       const TypePtr* spec_type = (t->speculative() != nullptr) ? t->speculative() : t->remove_speculative()->is_aryptr();
1387       ciSignature* method_signature = method()->signature();
1388       ciType* parm_citype = method_signature->type_at(method_signature->count() - 1);
1389       if (!parm_citype->is_obj_array_klass()) {
1390         continue;
1391       }
1392 
1393       ciObjArrayKlass* spec_citype = ciObjArrayKlass::make(parm_citype->as_obj_array_klass()->element_klass(), true);
1394       const Type* improved_spec_type = TypeKlassPtr::make(spec_citype, Type::trust_interfaces)->as_instance_type();
1395       improved_spec_type = improved_spec_type->join(spec_type)->join(TypePtr::NOTNULL);
1396       if (improved_spec_type->empty()) {
1397         continue;
1398       }
1399 
1400       const TypePtr* improved_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, improved_spec_type->is_ptr());
1401       improved_type = improved_type->join_speculative(t)->is_ptr();
1402       if (improved_type != t) {
1403         Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, improved_type, ConstraintCastNode::DependencyType::NonFloatingNarrowing));
1404         replace_in_map(parm, cast);
1405       }
1406     }
1407   }
1408 }
1409 
1410 //------------------------------init_blocks------------------------------------
1411 // Initialize our parser map to contain the types/monitors at method entry.
1412 void Parse::init_blocks() {
1413   // Create the blocks.
1414   _block_count = flow()->block_count();
1415   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1416 
1417   // Initialize the structs.
1418   for (int rpo = 0; rpo < block_count(); rpo++) {
1419     Block* block = rpo_at(rpo);
1420     new(block) Block(this, rpo);
1421   }
1422 
1423   // Collect predecessor and successor information.
1424   for (int rpo = 0; rpo < block_count(); rpo++) {
1425     Block* block = rpo_at(rpo);
1426     block->init_graph(this);
1427   }

1811 //--------------------handle_missing_successor---------------------------------
1812 void Parse::handle_missing_successor(int target_bci) {
1813 #ifndef PRODUCT
1814   Block* b = block();
1815   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1816   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1817 #endif
1818   ShouldNotReachHere();
1819 }
1820 
1821 //--------------------------merge_common---------------------------------------
1822 void Parse::merge_common(Parse::Block* target, int pnum) {
1823   if (TraceOptoParse) {
1824     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1825   }
1826 
1827   // Zap extra stack slots to top
1828   assert(sp() == target->start_sp(), "");
1829   clean_stack(sp());
1830 
1831   // Check for merge conflicts involving inline types
1832   JVMState* old_jvms = map()->jvms();
1833   int old_bci = bci();
1834   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1835   tmp_jvms->set_should_reexecute(true);
1836   tmp_jvms->bind_map(map());
1837   // Execution needs to restart a the next bytecode (entry of next
1838   // block)
1839   if (target->is_merged() ||
1840       pnum > PhiNode::Input ||
1841       target->is_handler() ||
1842       target->is_loop_head()) {
1843     set_parse_bci(target->start());
1844     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1845       Node* n = map()->in(j);                 // Incoming change to target state.
1846       const Type* t = nullptr;
1847       ciType* ct = nullptr;
1848       if (tmp_jvms->is_loc(j)) {
1849         int loc_idx = j - tmp_jvms->locoff();
1850         t = target->local_type_at(loc_idx);
1851         ct = target->flow()->local_type_at(loc_idx);
1852       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1853         int stk_idx = j - tmp_jvms->stkoff();
1854         t = target->stack_type_at(stk_idx);
1855         ct = target->flow()->stack_type_at(stk_idx);
1856       }
1857       if (t != nullptr && t != Type::BOTTOM) {
1858         // An object can appear in the JVMS as either an oop or an InlineTypeNode. If the merge is
1859         // an InlineTypeNode, we need all the merge inputs to be InlineTypeNodes. Else, if the
1860         // merge is an oop, each merge input needs to be either an oop or an buffered
1861         // InlineTypeNode.
1862         if (!t->is_inlinetypeptr()) {
1863           // The merge cannot be an InlineTypeNode, ensure the input is buffered if it is an
1864           // InlineTypeNode
1865           if (n->is_InlineType()) {
1866             map()->set_req(j, n->as_InlineType()->buffer(this));
1867           }
1868         } else {
1869           // Scalarize the value object if it is not larval
1870           if (!n->is_InlineType() && !ct->is_early_larval()) {
1871             assert(_gvn.type(n) == TypePtr::NULL_PTR, "must be a null constant");
1872             map()->set_req(j, InlineTypeNode::make_null(_gvn, t->inline_klass()));
1873           }
1874         }
1875       }
1876     }
1877   }
1878   old_jvms->bind_map(map());
1879   set_parse_bci(old_bci);
1880 
1881   if (!target->is_merged()) {   // No prior mapping at this bci
1882     if (TraceOptoParse) { tty->print(" with empty state");  }
1883 
1884     // If this path is dead, do not bother capturing it as a merge.
1885     // It is "as if" we had 1 fewer predecessors from the beginning.
1886     if (stopped()) {
1887       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1888       return;
1889     }
1890 
1891     // Make a region if we know there are multiple or unpredictable inputs.
1892     // (Also, if this is a plain fall-through, we might see another region,
1893     // which must not be allowed into this block's map.)
1894     if (pnum > PhiNode::Input         // Known multiple inputs.
1895         || target->is_handler()       // These have unpredictable inputs.
1896         || target->is_loop_head()     // Known multiple inputs
1897         || control()->is_Region()) {  // We must hide this guy.
1898 
1899       int current_bci = bci();
1900       set_parse_bci(target->start()); // Set target bci

1915       record_for_igvn(r);
1916       // zap all inputs to null for debugging (done in Node(uint) constructor)
1917       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1918       r->init_req(pnum, control());
1919       set_control(r);
1920       target->copy_irreducible_status_to(r, jvms());
1921       set_parse_bci(current_bci); // Restore bci
1922     }
1923 
1924     // Convert the existing Parser mapping into a mapping at this bci.
1925     store_state_to(target);
1926     assert(target->is_merged(), "do not come here twice");
1927 
1928   } else {                      // Prior mapping at this bci
1929     if (TraceOptoParse) {  tty->print(" with previous state"); }
1930 #ifdef ASSERT
1931     if (target->is_SEL_head()) {
1932       target->mark_merged_backedge(block());
1933     }
1934 #endif
1935 
1936     // We must not manufacture more phis if the target is already parsed.
1937     bool nophi = target->is_parsed();
1938 
1939     SafePointNode* newin = map();// Hang on to incoming mapping
1940     Block* save_block = block(); // Hang on to incoming block;
1941     load_state_from(target);    // Get prior mapping
1942 
1943     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1944     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1945     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1946     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1947 
1948     // Iterate over my current mapping and the old mapping.
1949     // Where different, insert Phi functions.
1950     // Use any existing Phi functions.
1951     assert(control()->is_Region(), "must be merging to a region");
1952     RegionNode* r = control()->as_Region();
1953 
1954     // Compute where to merge into
1955     // Merge incoming control path
1956     r->init_req(pnum, newin->control());
1957 
1958     if (pnum == 1) {            // Last merge for this Region?
1959       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1960         Node* result = _gvn.transform(r);
1961         if (r != result && TraceOptoParse) {
1962           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1963         }
1964       }
1965       record_for_igvn(r);
1966     }
1967 
1968     // Update all the non-control inputs to map:
1969     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1970     bool check_elide_phi = target->is_SEL_backedge(save_block);
1971     bool last_merge = (pnum == PhiNode::Input);
1972     for (uint j = 1; j < newin->req(); j++) {
1973       Node* m = map()->in(j);   // Current state of target.
1974       Node* n = newin->in(j);   // Incoming change to target state.
1975       Node* phi;
1976       if (m->is_Phi() && m->as_Phi()->region() == r) {
1977         phi = m;
1978       } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1979         phi = m;
1980       } else {
1981         phi = nullptr;
1982       }
1983       if (m != n) {             // Different; must merge
1984         switch (j) {
1985         // Frame pointer and Return Address never changes
1986         case TypeFunc::FramePtr:// Drop m, use the original value
1987         case TypeFunc::ReturnAdr:
1988           break;
1989         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1990           assert(phi == nullptr, "the merge contains phis, not vice versa");
1991           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1992           continue;
1993         default:                // All normal stuff
1994           if (phi == nullptr) {
1995             const JVMState* jvms = map()->jvms();
1996             if (EliminateNestedLocks &&
1997                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1998               // BoxLock nodes are not commoning when EliminateNestedLocks is on.
1999               // Use old BoxLock node as merged box.
2000               assert(newin->jvms()->is_monitor_box(j), "sanity");
2001               // This assert also tests that nodes are BoxLock.
2002               assert(BoxLockNode::same_slot(n, m), "sanity");

2009                 // Incremental Inlining before EA and Macro nodes elimination.
2010                 //
2011                 // Incremental Inlining is executed after IGVN optimizations
2012                 // during which BoxLock can be marked as Coarsened.
2013                 old_box->set_coarsened(); // Verifies state
2014                 old_box->set_unbalanced();
2015               }
2016               C->gvn_replace_by(n, m);
2017             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
2018               phi = ensure_phi(j, nophi);
2019             }
2020           }
2021           break;
2022         }
2023       }
2024       // At this point, n might be top if:
2025       //  - there is no phi (because TypeFlow detected a conflict), or
2026       //  - the corresponding control edges is top (a dead incoming path)
2027       // It is a bug if we create a phi which sees a garbage value on a live path.
2028 
2029       // Merging two inline types?
2030       if (phi != nullptr && phi->is_InlineType()) {
2031         // Reload current state because it may have been updated by ensure_phi
2032         assert(phi == map()->in(j), "unexpected value in map");
2033         assert(phi->as_InlineType()->has_phi_inputs(r), "");
2034         InlineTypeNode* vtm = phi->as_InlineType(); // Current inline type
2035         InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
2036         assert(vtm == phi, "Inline type should have Phi input");
2037 
2038 #ifdef ASSERT
2039         if (TraceOptoParse) {
2040           tty->print_cr("\nMerging inline types");
2041           tty->print_cr("Current:");
2042           vtm->dump(2);
2043           tty->print_cr("Incoming:");
2044           vtn->dump(2);
2045           tty->cr();
2046         }
2047 #endif
2048         // Do the merge
2049         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
2050         if (last_merge) {
2051           map()->set_req(j, _gvn.transform(vtm));
2052           record_for_igvn(vtm);
2053         }
2054       } else if (phi != nullptr) {
2055         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
2056         assert(phi->as_Phi()->region() == r, "");
2057         phi->set_req(pnum, n);  // Then add 'n' to the merge
2058         if (last_merge) {
2059           // Last merge for this Phi.
2060           // So far, Phis have had a reasonable type from ciTypeFlow.
2061           // Now _gvn will join that with the meet of current inputs.
2062           // BOTTOM is never permissible here, 'cause pessimistically
2063           // Phis of pointers cannot lose the basic pointer type.
2064           DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
2065           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
2066           map()->set_req(j, _gvn.transform(phi));
2067           DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
2068           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
2069           record_for_igvn(phi);
2070         }
2071       }
2072     } // End of for all values to be merged
2073 
2074     if (last_merge && !r->in(0)) {         // The occasional useless Region

2075       assert(control() == r, "");
2076       set_control(r->nonnull_req());
2077     }
2078 
2079     map()->merge_replaced_nodes_with(newin);
2080 
2081     // newin has been subsumed into the lazy merge, and is now dead.
2082     set_block(save_block);
2083 
2084     stop();                     // done with this guy, for now
2085   }
2086 
2087   if (TraceOptoParse) {
2088     tty->print_cr(" on path %d", pnum);
2089   }
2090 
2091   // Done with this parser state.
2092   assert(stopped(), "");
2093 }
2094 

2206 
2207   // Add new path to the region.
2208   uint pnum = r->req();
2209   r->add_req(nullptr);
2210 
2211   for (uint i = 1; i < map->req(); i++) {
2212     Node* n = map->in(i);
2213     if (i == TypeFunc::Memory) {
2214       // Ensure a phi on all currently known memories.
2215       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2216         Node* phi = mms.memory();
2217         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2218           assert(phi->req() == pnum, "must be same size as region");
2219           phi->add_req(nullptr);
2220         }
2221       }
2222     } else {
2223       if (n->is_Phi() && n->as_Phi()->region() == r) {
2224         assert(n->req() == pnum, "must be same size as region");
2225         n->add_req(nullptr);
2226       } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2227         n->as_InlineType()->add_new_path(r);
2228       }
2229     }
2230   }
2231 
2232   return pnum;
2233 }
2234 
2235 //------------------------------ensure_phi-------------------------------------
2236 // Turn the idx'th entry of the current map into a Phi
2237 Node* Parse::ensure_phi(int idx, bool nocreate) {
2238   SafePointNode* map = this->map();
2239   Node* region = map->control();
2240   assert(region->is_Region(), "");
2241 
2242   Node* o = map->in(idx);
2243   assert(o != nullptr, "");
2244 
2245   if (o == top())  return nullptr; // TOP always merges into TOP
2246 
2247   if (o->is_Phi() && o->as_Phi()->region() == region) {
2248     return o->as_Phi();
2249   }
2250   InlineTypeNode* vt = o->isa_InlineType();
2251   if (vt != nullptr && vt->has_phi_inputs(region)) {
2252     return vt;
2253   }
2254 
2255   // Now use a Phi here for merging
2256   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2257   const JVMState* jvms = map->jvms();
2258   const Type* t = nullptr;
2259   if (jvms->is_loc(idx)) {
2260     t = block()->local_type_at(idx - jvms->locoff());
2261   } else if (jvms->is_stk(idx)) {
2262     t = block()->stack_type_at(idx - jvms->stkoff());
2263   } else if (jvms->is_mon(idx)) {
2264     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2265     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2266   } else if ((uint)idx < TypeFunc::Parms) {
2267     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2268   } else {
2269     assert(false, "no type information for this phi");
2270   }
2271 
2272   // If the type falls to bottom, then this must be a local that
2273   // is already dead or is mixing ints and oops or some such.
2274   // Forcing it to top makes it go dead.
2275   if (t == Type::BOTTOM) {
2276     map->set_req(idx, top());
2277     return nullptr;
2278   }
2279 
2280   // Do not create phis for top either.
2281   // A top on a non-null control flow must be an unused even after the.phi.
2282   if (t == Type::TOP || t == Type::HALF) {
2283     map->set_req(idx, top());
2284     return nullptr;
2285   }
2286 
2287   if (vt != nullptr && t->is_inlinetypeptr()) {
2288     // Inline types are merged by merging their field values.
2289     // Create a cloned InlineTypeNode with phi inputs that
2290     // represents the merged inline type and update the map.
2291     vt = vt->clone_with_phis(&_gvn, region);
2292     map->set_req(idx, vt);
2293     return vt;
2294   } else {
2295     PhiNode* phi = PhiNode::make(region, o, t);
2296     gvn().set_type(phi, t);
2297     if (C->do_escape_analysis()) record_for_igvn(phi);
2298     map->set_req(idx, phi);
2299     return phi;
2300   }
2301 }
2302 
2303 //--------------------------ensure_memory_phi----------------------------------
2304 // Turn the idx'th slice of the current memory into a Phi
2305 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2306   MergeMemNode* mem = merged_memory();
2307   Node* region = control();
2308   assert(region->is_Region(), "");
2309 
2310   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2311   assert(o != nullptr && o != top(), "");
2312 
2313   PhiNode* phi;
2314   if (o->is_Phi() && o->as_Phi()->region() == region) {
2315     phi = o->as_Phi();
2316     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2317       // clone the shared base memory phi to make a new memory split
2318       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2319       const Type* t = phi->bottom_type();
2320       const TypePtr* adr_type = C->get_adr_type(idx);

2410 // Add check to deoptimize once holder klass is fully initialized.
2411 void Parse::clinit_deopt() {
2412   assert(C->has_method(), "only for normal compilations");
2413   assert(depth() == 1, "only for main compiled method");
2414   assert(is_normal_parse(), "no barrier needed on osr entry");
2415   assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
2416 
2417   set_parse_bci(0);
2418 
2419   Node* holder = makecon(TypeKlassPtr::make(method()->holder(), Type::trust_interfaces));
2420   guard_klass_being_initialized(holder);
2421 }
2422 
2423 //------------------------------return_current---------------------------------
2424 // Append current _map to _exit_return
2425 void Parse::return_current(Node* value) {
2426   if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2427     call_register_finalizer();
2428   }
2429 
2430   // frame pointer is always same, already captured
2431   if (value != nullptr) {
2432     Node* phi = _exits.argument(0);
2433     const Type* return_type = phi->bottom_type();
2434     const TypeInstPtr* tr = return_type->isa_instptr();
2435     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2436         return_type->is_inlinetypeptr()) {
2437       // Inline type is returned as fields, make sure it is scalarized
2438       if (!value->is_InlineType()) {
2439         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2440       }
2441       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2442         // Returning from root or an incrementally inlined method. Make sure all non-flat
2443         // fields are buffered and re-execute if allocation triggers deoptimization.
2444         PreserveReexecuteState preexecs(this);
2445         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2446         jvms()->set_should_reexecute(true);
2447         inc_sp(1);
2448         value = value->as_InlineType()->allocate_fields(this);
2449       }
2450     } else if (value->is_InlineType()) {
2451       // Inline type is returned as oop, make sure it is buffered and re-execute
2452       // if allocation triggers deoptimization.
2453       PreserveReexecuteState preexecs(this);
2454       jvms()->set_should_reexecute(true);
2455       inc_sp(1);
2456       value = value->as_InlineType()->buffer(this);
2457     }
2458     // ...else
2459     // If returning oops to an interface-return, there is a silent free
2460     // cast from oop to interface allowed by the Verifier. Make it explicit here.
2461     phi->add_req(value);
2462   }
2463 
2464   // Do not set_parse_bci, so that return goo is credited to the return insn.
2465   set_bci(InvocationEntryBci);
2466   if (method()->is_synchronized()) {
2467     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2468   }
2469   if (C->env()->dtrace_method_probes()) {
2470     make_dtrace_method_exit(method());
2471   }
2472 
2473   SafePointNode* exit_return = _exits.map();
2474   exit_return->in( TypeFunc::Control  )->add_req( control() );
2475   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2476   Node *mem = exit_return->in( TypeFunc::Memory   );
2477   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2478     if (mms.is_empty()) {
2479       // get a copy of the base memory, and patch just this one input
2480       const TypePtr* adr_type = mms.adr_type(C);
2481       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2482       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2483       gvn().set_type_bottom(phi);
2484       phi->del_req(phi->req()-1);  // prepare to re-patch
2485       mms.set_memory(phi);
2486     }
2487     mms.memory()->add_req(mms.memory2());
2488   }
2489 









2490   if (_first_return) {
2491     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2492     _first_return = false;
2493   } else {
2494     _exits.map()->merge_replaced_nodes_with(map());
2495   }
2496 
2497   stop_and_kill_map();          // This CFG path dies here
2498 }
2499 
2500 
2501 //------------------------------add_safepoint----------------------------------
2502 void Parse::add_safepoint() {
2503   uint parms = TypeFunc::Parms+1;
2504 
2505   // Clear out dead values from the debug info.
2506   kill_dead_locals();
2507 
2508   // Clone the JVM State
2509   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
< prev index next >