< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "interpreter/linkResolver.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/method.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/c2compiler.hpp"
  32 #include "opto/castnode.hpp"

  33 #include "opto/idealGraphPrinter.hpp"

  34 #include "opto/locknode.hpp"
  35 #include "opto/memnode.hpp"
  36 #include "opto/opaquenode.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/type.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/bitMap.inline.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 // Static array so we can figure out which bytecodes stop us from compiling
  48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  50 
  51 #ifndef PRODUCT
  52 uint nodes_created             = 0;
  53 uint methods_parsed            = 0;

  85   }
  86   if (all_null_checks_found) {
  87     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  88                   (100*implicit_null_checks)/all_null_checks_found);
  89   }
  90   if (SharedRuntime::_implicit_null_throws) {
  91     tty->print_cr("%u implicit null exceptions at runtime",
  92                   SharedRuntime::_implicit_null_throws);
  93   }
  94 
  95   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  96     BytecodeParseHistogram::print();
  97   }
  98 }
  99 #endif
 100 
 101 //------------------------------ON STACK REPLACEMENT---------------------------
 102 
 103 // Construct a node which can be used to get incoming state for
 104 // on stack replacement.
 105 Node *Parse::fetch_interpreter_state(int index,
 106                                      BasicType bt,
 107                                      Node *local_addrs,
 108                                      Node *local_addrs_base) {






 109   Node *mem = memory(Compile::AliasIdxRaw);
 110   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 111   Node *ctl = control();
 112 
 113   // Very similar to LoadNode::make, except we handle un-aligned longs and
 114   // doubles on Sparc.  Intel can handle them just fine directly.
 115   Node *l = nullptr;
 116   switch (bt) {                // Signature is flattened
 117   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 118   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 119   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;

 120   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 121   case T_LONG:
 122   case T_DOUBLE: {
 123     // Since arguments are in reverse order, the argument address 'adr'
 124     // refers to the back half of the long/double.  Recompute adr.
 125     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 126     if (Matcher::misaligned_doubles_ok) {
 127       l = (bt == T_DOUBLE)
 128         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 129         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 130     } else {
 131       l = (bt == T_DOUBLE)
 132         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 133         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 134     }
 135     break;
 136   }
 137   default: ShouldNotReachHere();
 138   }
 139   return _gvn.transform(l);
 140 }
 141 
 142 // Helper routine to prevent the interpreter from handing
 143 // unexpected typestate to an OSR method.
 144 // The Node l is a value newly dug out of the interpreter frame.
 145 // The type is the type predicted by ciTypeFlow.  Note that it is
 146 // not a general type, but can only come from Type::get_typeflow_type.
 147 // The safepoint is a map which will feed an uncommon trap.
 148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 149                                     SafePointNode* &bad_type_exit) {
 150 
 151   const TypeOopPtr* tp = type->isa_oopptr();
 152 
 153   // TypeFlow may assert null-ness if a type appears unloaded.
 154   if (type == TypePtr::NULL_PTR ||
 155       (tp != nullptr && !tp->is_loaded())) {
 156     // Value must be null, not a real oop.
 157     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 158     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 159     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 160     set_control(_gvn.transform( new IfTrueNode(iff) ));
 161     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 162     bad_type_exit->control()->add_req(bad_type);
 163     l = null();
 164   }
 165 
 166   // Typeflow can also cut off paths from the CFG, based on
 167   // types which appear unloaded, or call sites which appear unlinked.
 168   // When paths are cut off, values at later merge points can rise
 169   // toward more specific classes.  Make sure these specific classes
 170   // are still in effect.
 171   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 172     // TypeFlow asserted a specific object type.  Value must have that type.
 173     Node* bad_type_ctrl = nullptr;






 174     l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
 175     bad_type_exit->control()->add_req(bad_type_ctrl);
 176   }
 177 
 178   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 179   return l;
 180 }
 181 
 182 // Helper routine which sets up elements of the initial parser map when
 183 // performing a parse for on stack replacement.  Add values into map.
 184 // The only parameter contains the address of a interpreter arguments.
 185 void Parse::load_interpreter_state(Node* osr_buf) {
 186   int index;
 187   int max_locals = jvms()->loc_size();
 188   int max_stack  = jvms()->stk_size();
 189 
 190 
 191   // Mismatch between method and jvms can occur since map briefly held
 192   // an OSR entry state (which takes up one RawPtr word).
 193   assert(max_locals == method()->max_locals(), "sanity");
 194   assert(max_stack  >= method()->max_stack(),  "sanity");
 195   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 196   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 197 
 198   // Find the start block.
 199   Block* osr_block = start_block();
 200   assert(osr_block->start() == osr_bci(), "sanity");
 201 
 202   // Set initial BCI.
 203   set_parse_bci(osr_block->start());
 204 
 205   // Set initial stack depth.
 206   set_sp(osr_block->start_sp());
 207 
 208   // Check bailouts.  We currently do not perform on stack replacement
 209   // of loops in catch blocks or loops which branch with a non-empty stack.
 210   if (sp() != 0) {
 211     C->record_method_not_compilable("OSR starts with non-empty stack");
 212     return;
 213   }
 214   // Do not OSR inside finally clauses:
 215   if (osr_block->has_trap_at(osr_block->start())) {
 216     assert(false, "OSR starts with an immediate trap");
 217     C->record_method_not_compilable("OSR starts with an immediate trap");
 218     return;
 219   }
 220 
 221   // Commute monitors from interpreter frame to compiler frame.
 222   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 223   int mcnt = osr_block->flow()->monitor_count();
 224   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 225   for (index = 0; index < mcnt; index++) {
 226     // Make a BoxLockNode for the monitor.
 227     Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
 228 
 229 
 230     // Displaced headers and locked objects are interleaved in the
 231     // temp OSR buffer.  We only copy the locked objects out here.
 232     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 233     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
 234     // Try and copy the displaced header to the BoxNode
 235     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 236 
 237 
 238     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 239 
 240     // Build a bogus FastLockNode (no code will be generated) and push the
 241     // monitor into our debug info.
 242     const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
 243     map()->push_monitor(flock);
 244 
 245     // If the lock is our method synchronization lock, tuck it away in
 246     // _sync_lock for return and rethrow exit paths.
 247     if (index == 0 && method()->is_synchronized()) {
 248       _synch_lock = flock;
 249     }
 250   }
 251 
 252   // Use the raw liveness computation to make sure that unexpected
 253   // values don't propagate into the OSR frame.
 254   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 255   if (!live_locals.is_valid()) {
 256     // Degenerate or breakpointed method.

 284         if (C->log() != nullptr) {
 285           C->log()->elem("OSR_mismatch local_index='%d'",index);
 286         }
 287         set_local(index, null());
 288         // and ignore it for the loads
 289         continue;
 290       }
 291     }
 292 
 293     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 294     if (type == Type::TOP || type == Type::HALF) {
 295       continue;
 296     }
 297     // If the type falls to bottom, then this must be a local that
 298     // is mixing ints and oops or some such.  Forcing it to top
 299     // makes it go dead.
 300     if (type == Type::BOTTOM) {
 301       continue;
 302     }
 303     // Construct code to access the appropriate local.
 304     BasicType bt = type->basic_type();
 305     if (type == TypePtr::NULL_PTR) {
 306       // Ptr types are mixed together with T_ADDRESS but null is
 307       // really for T_OBJECT types so correct it.
 308       bt = T_OBJECT;
 309     }
 310     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
 311     set_local(index, value);
 312   }
 313 
 314   // Extract the needed stack entries from the interpreter frame.
 315   for (index = 0; index < sp(); index++) {
 316     const Type *type = osr_block->stack_type_at(index);
 317     if (type != Type::TOP) {
 318       // Currently the compiler bails out when attempting to on stack replace
 319       // at a bci with a non-empty stack.  We should not reach here.
 320       ShouldNotReachHere();
 321     }
 322   }
 323 
 324   // End the OSR migration
 325   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 326                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 327                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 328                     osr_buf);
 329 
 330   // Now that the interpreter state is loaded, make sure it will match

 493     log->elem("observe that='has_exception_handlers'");
 494   }
 495 
 496   assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
 497   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
 498 
 499   // Always register dependence if JVMTI is enabled, because
 500   // either breakpoint setting or hotswapping of methods may
 501   // cause deoptimization.
 502   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 503     C->dependencies()->assert_evol_method(method());
 504   }
 505 
 506   NOT_PRODUCT(methods_seen++);
 507 
 508   // Do some special top-level things.
 509   if (depth() == 1 && C->is_osr_compilation()) {
 510     _entry_bci = C->entry_bci();
 511     _flow = method()->get_osr_flow_analysis(osr_bci());
 512     if (_flow->failing()) {
 513       assert(false, "type flow analysis failed for OSR compilation");


 514       C->record_method_not_compilable(_flow->failure_reason());
 515 #ifndef PRODUCT
 516       if (PrintOpto && (Verbose || WizardMode)) {
 517         tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 518         if (Verbose) {
 519           method()->print();
 520           method()->print_codes();
 521           _flow->print();
 522         }
 523       }
 524 #endif
 525     }
 526     _tf = C->tf();     // the OSR entry type is different
 527   }
 528 
 529 #ifdef ASSERT
 530   if (depth() == 1) {
 531     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
 532   } else {
 533     assert(!this->is_osr_parse(), "no recursive OSR");

 581     do_method_entry();
 582   }
 583 
 584   if (depth() == 1 && !failing()) {
 585     if (C->clinit_barrier_on_entry()) {
 586       // Add check to deoptimize the nmethod once the holder class is fully initialized
 587       clinit_deopt();
 588     }
 589 
 590     // Add check to deoptimize the nmethod if RTM state was changed
 591     rtm_deopt();
 592   }
 593 
 594   // Check for bailouts during method entry or RTM state check setup.
 595   if (failing()) {
 596     if (log)  log->done("parse");
 597     C->set_default_node_notes(caller_nn);
 598     return;
 599   }
 600 





















 601   entry_map = map();  // capture any changes performed by method setup code
 602   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 603 
 604   // We begin parsing as if we have just encountered a jump to the
 605   // method entry.
 606   Block* entry_block = start_block();
 607   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 608   set_map_clone(entry_map);
 609   merge_common(entry_block, entry_block->next_path_num());
 610 
 611 #ifndef PRODUCT
 612   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 613   set_parse_histogram( parse_histogram_obj );
 614 #endif
 615 
 616   // Parse all the basic blocks.
 617   do_all_blocks();
 618 
 619   // Check for bailouts during conversion to graph
 620   if (failing()) {

 766 void Parse::build_exits() {
 767   // make a clone of caller to prevent sharing of side-effects
 768   _exits.set_map(_exits.clone_map());
 769   _exits.clean_stack(_exits.sp());
 770   _exits.sync_jvms();
 771 
 772   RegionNode* region = new RegionNode(1);
 773   record_for_igvn(region);
 774   gvn().set_type_bottom(region);
 775   _exits.set_control(region);
 776 
 777   // Note:  iophi and memphi are not transformed until do_exits.
 778   Node* iophi  = new PhiNode(region, Type::ABIO);
 779   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 780   gvn().set_type_bottom(iophi);
 781   gvn().set_type_bottom(memphi);
 782   _exits.set_i_o(iophi);
 783   _exits.set_all_memory(memphi);
 784 
 785   // Add a return value to the exit state.  (Do not push it yet.)
 786   if (tf()->range()->cnt() > TypeFunc::Parms) {
 787     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
 788     if (ret_type->isa_int()) {
 789       BasicType ret_bt = method()->return_type()->basic_type();
 790       if (ret_bt == T_BOOLEAN ||
 791           ret_bt == T_CHAR ||
 792           ret_bt == T_BYTE ||
 793           ret_bt == T_SHORT) {
 794         ret_type = TypeInt::INT;
 795       }
 796     }
 797 
 798     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 799     // becomes loaded during the subsequent parsing, the loaded and unloaded
 800     // types will not join when we transform and push in do_exits().
 801     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 802     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 803       ret_type = TypeOopPtr::BOTTOM;
 804     }
 805     int         ret_size = type2size[ret_type->basic_type()];
 806     Node*       ret_phi  = new PhiNode(region, ret_type);
 807     gvn().set_type_bottom(ret_phi);
 808     _exits.ensure_stack(ret_size);
 809     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 810     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 811     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 812     // Note:  ret_phi is not yet pushed, until do_exits.
 813   }
 814 }
 815 
 816 
 817 //----------------------------build_start_state-------------------------------
 818 // Construct a state which contains only the incoming arguments from an
 819 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 820 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 821   int        arg_size = tf->domain()->cnt();
 822   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
 823   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 824   SafePointNode* map  = new SafePointNode(max_size, jvms);

 825   record_for_igvn(map);
 826   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 827   Node_Notes* old_nn = default_node_notes();
 828   if (old_nn != nullptr && has_method()) {
 829     Node_Notes* entry_nn = old_nn->clone(this);
 830     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 831     entry_jvms->set_offsets(0);
 832     entry_jvms->set_bci(entry_bci());
 833     entry_nn->set_jvms(entry_jvms);
 834     set_default_node_notes(entry_nn);
 835   }
 836   uint i;
 837   for (i = 0; i < (uint)arg_size; i++) {
 838     Node* parm = initial_gvn()->transform(new ParmNode(start, i));

















 839     map->init_req(i, parm);
 840     // Record all these guys for later GVN.
 841     record_for_igvn(parm);



 842   }
 843   for (; i < map->req(); i++) {
 844     map->init_req(i, top());
 845   }
 846   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 847   set_default_node_notes(old_nn);
 848   jvms->set_map(map);
 849   return jvms;
 850 }
 851 
 852 //-----------------------------make_node_notes---------------------------------
 853 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 854   if (caller_nn == nullptr)  return nullptr;
 855   Node_Notes* nn = caller_nn->clone(C);
 856   JVMState* caller_jvms = nn->jvms();
 857   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 858   jvms->set_offsets(0);
 859   jvms->set_bci(_entry_bci);
 860   nn->set_jvms(jvms);
 861   return nn;
 862 }
 863 
 864 
 865 //--------------------------return_values--------------------------------------
 866 void Compile::return_values(JVMState* jvms) {
 867   GraphKit kit(jvms);
 868   Node* ret = new ReturnNode(TypeFunc::Parms,
 869                              kit.control(),
 870                              kit.i_o(),
 871                              kit.reset_memory(),
 872                              kit.frameptr(),
 873                              kit.returnadr());
 874   // Add zero or 1 return values
 875   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
 876   if (ret_size > 0) {
 877     kit.inc_sp(-ret_size);  // pop the return value(s)
 878     kit.sync_jvms();
 879     ret->add_req(kit.argument(0));
 880     // Note:  The second dummy edge is not needed by a ReturnNode.
























 881   }
 882   // bind it to root
 883   root()->add_req(ret);
 884   record_for_igvn(ret);
 885   initial_gvn()->transform_no_reclaim(ret);
 886 }
 887 
 888 //------------------------rethrow_exceptions-----------------------------------
 889 // Bind all exception states in the list into a single RethrowNode.
 890 void Compile::rethrow_exceptions(JVMState* jvms) {
 891   GraphKit kit(jvms);
 892   if (!kit.has_exceptions())  return;  // nothing to generate
 893   // Load my combined exception state into the kit, with all phis transformed:
 894   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 895   Node* ex_oop = kit.use_exception_state(ex_map);
 896   RethrowNode* exit = new RethrowNode(kit.control(),
 897                                       kit.i_o(), kit.reset_memory(),
 898                                       kit.frameptr(), kit.returnadr(),
 899                                       // like a return but with exception input
 900                                       ex_oop);

 984   //    to complete, we force all writes to complete.
 985   //
 986   // 2. Experimental VM option is used to force the barrier if any field
 987   //    was written out in the constructor.
 988   //
 989   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
 990   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
 991   //    MemBarVolatile is used before volatile load instead of after volatile
 992   //    store, so there's no barrier after the store.
 993   //    We want to guarantee the same behavior as on platforms with total store
 994   //    order, although this is not required by the Java memory model.
 995   //    In this case, we want to enforce visibility of volatile field
 996   //    initializations which are performed in constructors.
 997   //    So as with finals, we add a barrier here.
 998   //
 999   // "All bets are off" unless the first publication occurs after a
1000   // normal return from the constructor.  We do not attempt to detect
1001   // such unusual early publications.  But no barrier is needed on
1002   // exceptional returns, since they cannot publish normally.
1003   //
1004   if (method()->is_initializer() &&
1005        (wrote_final() ||
1006          (AlwaysSafeConstructors && wrote_fields()) ||
1007          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1008     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1009 
1010     // If Memory barrier is created for final fields write
1011     // and allocation node does not escape the initialize method,
1012     // then barrier introduced by allocation node can be removed.
1013     if (DoEscapeAnalysis && alloc_with_final()) {
1014       AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1015       alloc->compute_MemBar_redundancy(method());
1016     }
1017     if (PrintOpto && (Verbose || WizardMode)) {
1018       method()->print_name();
1019       tty->print_cr(" writes finals and needs a memory barrier");
1020     }
1021   }
1022 
1023   // Any method can write a @Stable field; insert memory barriers
1024   // after those also. Can't bind predecessor allocation node (if any)
1025   // with barrier because allocation doesn't always dominate
1026   // MemBarRelease.
1027   if (wrote_stable()) {
1028     _exits.insert_mem_bar(Op_MemBarRelease);
1029     if (PrintOpto && (Verbose || WizardMode)) {
1030       method()->print_name();
1031       tty->print_cr(" writes @Stable and needs a memory barrier");
1032     }
1033   }
1034 
1035   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1036     // transform each slice of the original memphi:
1037     mms.set_memory(_gvn.transform(mms.memory()));
1038   }
1039   // Clean up input MergeMems created by transforming the slices
1040   _gvn.transform(_exits.merged_memory());
1041 
1042   if (tf()->range()->cnt() > TypeFunc::Parms) {
1043     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1044     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1045     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1046       // If the type we set for the ret_phi in build_exits() is too optimistic and
1047       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1048       // loading.  It could also be due to an error, so mark this method as not compilable because
1049       // otherwise this could lead to an infinite compile loop.
1050       // In any case, this code path is rarely (and never in my testing) reached.
1051 #ifdef ASSERT
1052       tty->print_cr("# Can't determine return type.");
1053       tty->print_cr("# exit control");
1054       _exits.control()->dump(2);
1055       tty->print_cr("# ret phi type");
1056       _gvn.type(ret_phi)->dump();
1057       tty->print_cr("# ret phi");
1058       ret_phi->dump(2);
1059 #endif // ASSERT
1060       assert(false, "Can't determine return type.");
1061       C->record_method_not_compilable("Can't determine return type.");
1062       return;
1063     }

1127 
1128 //-----------------------------create_entry_map-------------------------------
1129 // Initialize our parser map to contain the types at method entry.
1130 // For OSR, the map contains a single RawPtr parameter.
1131 // Initial monitor locking for sync. methods is performed by do_method_entry.
1132 SafePointNode* Parse::create_entry_map() {
1133   // Check for really stupid bail-out cases.
1134   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1135   if (len >= 32760) {
1136     // Bailout expected, this is a very rare edge case.
1137     C->record_method_not_compilable("too many local variables");
1138     return nullptr;
1139   }
1140 
1141   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1142   _caller->map()->delete_replaced_nodes();
1143 
1144   // If this is an inlined method, we may have to do a receiver null check.
1145   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1146     GraphKit kit(_caller);
1147     kit.null_check_receiver_before_call(method());
1148     _caller = kit.transfer_exceptions_into_jvms();
1149     if (kit.stopped()) {
1150       _exits.add_exception_states_from(_caller);
1151       _exits.set_jvms(_caller);
1152       return nullptr;
1153     }
1154   }
1155 
1156   assert(method() != nullptr, "parser must have a method");
1157 
1158   // Create an initial safepoint to hold JVM state during parsing
1159   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1160   set_map(new SafePointNode(len, jvms));
1161   jvms->set_map(map());
1162   record_for_igvn(map());
1163   assert(jvms->endoff() == len, "correct jvms sizing");
1164 
1165   SafePointNode* inmap = _caller->map();
1166   assert(inmap != nullptr, "must have inmap");
1167   // In case of null check on receiver above
1168   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1169 
1170   uint i;
1171 
1172   // Pass thru the predefined input parameters.
1173   for (i = 0; i < TypeFunc::Parms; i++) {
1174     map()->init_req(i, inmap->in(i));
1175   }
1176 
1177   if (depth() == 1) {
1178     assert(map()->memory()->Opcode() == Op_Parm, "");
1179     // Insert the memory aliasing node
1180     set_all_memory(reset_memory());
1181   }
1182   assert(merged_memory(), "");
1183 
1184   // Now add the locals which are initially bound to arguments:
1185   uint arg_size = tf()->domain()->cnt();
1186   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1187   for (i = TypeFunc::Parms; i < arg_size; i++) {
1188     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1189   }
1190 
1191   // Clear out the rest of the map (locals and stack)
1192   for (i = arg_size; i < len; i++) {
1193     map()->init_req(i, top());
1194   }
1195 
1196   SafePointNode* entry_map = stop();
1197   return entry_map;
1198 }
1199 
1200 //-----------------------------do_method_entry--------------------------------
1201 // Emit any code needed in the pseudo-block before BCI zero.
1202 // The main thing to do is lock the receiver of a synchronized method.
1203 void Parse::do_method_entry() {
1204   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1205   set_sp(0);                         // Java Stack Pointer

1239 
1240   // If the method is synchronized, we need to construct a lock node, attach
1241   // it to the Start node, and pin it there.
1242   if (method()->is_synchronized()) {
1243     // Insert a FastLockNode right after the Start which takes as arguments
1244     // the current thread pointer, the "this" pointer & the address of the
1245     // stack slot pair used for the lock.  The "this" pointer is a projection
1246     // off the start node, but the locking spot has to be constructed by
1247     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1248     // becomes the second argument to the FastLockNode call.  The
1249     // FastLockNode becomes the new control parent to pin it to the start.
1250 
1251     // Setup Object Pointer
1252     Node *lock_obj = nullptr;
1253     if (method()->is_static()) {
1254       ciInstance* mirror = _method->holder()->java_mirror();
1255       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1256       lock_obj = makecon(t_lock);
1257     } else {                  // Else pass the "this" pointer,
1258       lock_obj = local(0);    // which is Parm0 from StartNode

1259     }
1260     // Clear out dead values from the debug info.
1261     kill_dead_locals();
1262     // Build the FastLockNode
1263     _synch_lock = shared_lock(lock_obj);
1264   }
1265 
1266   // Feed profiling data for parameters to the type system so it can
1267   // propagate it as speculative types
1268   record_profiled_parameters_for_speculation();
1269 }
1270 
1271 //------------------------------init_blocks------------------------------------
1272 // Initialize our parser map to contain the types/monitors at method entry.
1273 void Parse::init_blocks() {
1274   // Create the blocks.
1275   _block_count = flow()->block_count();
1276   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1277 
1278   // Initialize the structs.

1655 //--------------------handle_missing_successor---------------------------------
1656 void Parse::handle_missing_successor(int target_bci) {
1657 #ifndef PRODUCT
1658   Block* b = block();
1659   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1660   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1661 #endif
1662   ShouldNotReachHere();
1663 }
1664 
1665 //--------------------------merge_common---------------------------------------
1666 void Parse::merge_common(Parse::Block* target, int pnum) {
1667   if (TraceOptoParse) {
1668     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1669   }
1670 
1671   // Zap extra stack slots to top
1672   assert(sp() == target->start_sp(), "");
1673   clean_stack(sp());
1674 




































1675   if (!target->is_merged()) {   // No prior mapping at this bci
1676     if (TraceOptoParse) { tty->print(" with empty state");  }
1677 
1678     // If this path is dead, do not bother capturing it as a merge.
1679     // It is "as if" we had 1 fewer predecessors from the beginning.
1680     if (stopped()) {
1681       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1682       return;
1683     }
1684 
1685     // Make a region if we know there are multiple or unpredictable inputs.
1686     // (Also, if this is a plain fall-through, we might see another region,
1687     // which must not be allowed into this block's map.)
1688     if (pnum > PhiNode::Input         // Known multiple inputs.
1689         || target->is_handler()       // These have unpredictable inputs.
1690         || target->is_loop_head()     // Known multiple inputs
1691         || control()->is_Region()) {  // We must hide this guy.
1692 
1693       int current_bci = bci();
1694       set_parse_bci(target->start()); // Set target bci

1709       record_for_igvn(r);
1710       // zap all inputs to null for debugging (done in Node(uint) constructor)
1711       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1712       r->init_req(pnum, control());
1713       set_control(r);
1714       target->copy_irreducible_status_to(r, jvms());
1715       set_parse_bci(current_bci); // Restore bci
1716     }
1717 
1718     // Convert the existing Parser mapping into a mapping at this bci.
1719     store_state_to(target);
1720     assert(target->is_merged(), "do not come here twice");
1721 
1722   } else {                      // Prior mapping at this bci
1723     if (TraceOptoParse) {  tty->print(" with previous state"); }
1724 #ifdef ASSERT
1725     if (target->is_SEL_head()) {
1726       target->mark_merged_backedge(block());
1727     }
1728 #endif

1729     // We must not manufacture more phis if the target is already parsed.
1730     bool nophi = target->is_parsed();
1731 
1732     SafePointNode* newin = map();// Hang on to incoming mapping
1733     Block* save_block = block(); // Hang on to incoming block;
1734     load_state_from(target);    // Get prior mapping
1735 
1736     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1737     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1738     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1739     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1740 
1741     // Iterate over my current mapping and the old mapping.
1742     // Where different, insert Phi functions.
1743     // Use any existing Phi functions.
1744     assert(control()->is_Region(), "must be merging to a region");
1745     RegionNode* r = control()->as_Region();
1746 
1747     // Compute where to merge into
1748     // Merge incoming control path
1749     r->init_req(pnum, newin->control());
1750 
1751     if (pnum == 1) {            // Last merge for this Region?
1752       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1753         Node* result = _gvn.transform_no_reclaim(r);
1754         if (r != result && TraceOptoParse) {
1755           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1756         }
1757       }
1758       record_for_igvn(r);
1759     }
1760 
1761     // Update all the non-control inputs to map:
1762     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1763     bool check_elide_phi = target->is_SEL_backedge(save_block);

1764     for (uint j = 1; j < newin->req(); j++) {
1765       Node* m = map()->in(j);   // Current state of target.
1766       Node* n = newin->in(j);   // Incoming change to target state.
1767       PhiNode* phi;
1768       if (m->is_Phi() && m->as_Phi()->region() == r)
1769         phi = m->as_Phi();
1770       else


1771         phi = nullptr;

1772       if (m != n) {             // Different; must merge
1773         switch (j) {
1774         // Frame pointer and Return Address never changes
1775         case TypeFunc::FramePtr:// Drop m, use the original value
1776         case TypeFunc::ReturnAdr:
1777           break;
1778         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1779           assert(phi == nullptr, "the merge contains phis, not vice versa");
1780           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1781           continue;
1782         default:                // All normal stuff
1783           if (phi == nullptr) {
1784             const JVMState* jvms = map()->jvms();
1785             if (EliminateNestedLocks &&
1786                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1787               // BoxLock nodes are not commoning.
1788               // Use old BoxLock node as merged box.
1789               assert(newin->jvms()->is_monitor_box(j), "sanity");
1790               // This assert also tests that nodes are BoxLock.
1791               assert(BoxLockNode::same_slot(n, m), "sanity");
1792               C->gvn_replace_by(n, m);
1793             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1794               phi = ensure_phi(j, nophi);
1795             }
1796           }
1797           break;
1798         }
1799       }
1800       // At this point, n might be top if:
1801       //  - there is no phi (because TypeFlow detected a conflict), or
1802       //  - the corresponding control edges is top (a dead incoming path)
1803       // It is a bug if we create a phi which sees a garbage value on a live path.
1804 
1805       if (phi != nullptr) {























1806         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1807         assert(phi->region() == r, "");
1808         phi->set_req(pnum, n);  // Then add 'n' to the merge
1809         if (pnum == PhiNode::Input) {
1810           // Last merge for this Phi.
1811           // So far, Phis have had a reasonable type from ciTypeFlow.
1812           // Now _gvn will join that with the meet of current inputs.
1813           // BOTTOM is never permissible here, 'cause pessimistically
1814           // Phis of pointers cannot lose the basic pointer type.
1815           debug_only(const Type* bt1 = phi->bottom_type());
1816           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1817           map()->set_req(j, _gvn.transform_no_reclaim(phi));
1818           debug_only(const Type* bt2 = phi->bottom_type());
1819           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1820           record_for_igvn(phi);
1821         }
1822       }
1823     } // End of for all values to be merged
1824 
1825     if (pnum == PhiNode::Input &&
1826         !r->in(0)) {         // The occasional useless Region
1827       assert(control() == r, "");
1828       set_control(r->nonnull_req());
1829     }
1830 
1831     map()->merge_replaced_nodes_with(newin);
1832 
1833     // newin has been subsumed into the lazy merge, and is now dead.
1834     set_block(save_block);
1835 
1836     stop();                     // done with this guy, for now
1837   }
1838 
1839   if (TraceOptoParse) {
1840     tty->print_cr(" on path %d", pnum);
1841   }
1842 
1843   // Done with this parser state.
1844   assert(stopped(), "");
1845 }
1846 

1958 
1959   // Add new path to the region.
1960   uint pnum = r->req();
1961   r->add_req(nullptr);
1962 
1963   for (uint i = 1; i < map->req(); i++) {
1964     Node* n = map->in(i);
1965     if (i == TypeFunc::Memory) {
1966       // Ensure a phi on all currently known memories.
1967       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1968         Node* phi = mms.memory();
1969         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1970           assert(phi->req() == pnum, "must be same size as region");
1971           phi->add_req(nullptr);
1972         }
1973       }
1974     } else {
1975       if (n->is_Phi() && n->as_Phi()->region() == r) {
1976         assert(n->req() == pnum, "must be same size as region");
1977         n->add_req(nullptr);


1978       }
1979     }
1980   }
1981 
1982   return pnum;
1983 }
1984 
1985 //------------------------------ensure_phi-------------------------------------
1986 // Turn the idx'th entry of the current map into a Phi
1987 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
1988   SafePointNode* map = this->map();
1989   Node* region = map->control();
1990   assert(region->is_Region(), "");
1991 
1992   Node* o = map->in(idx);
1993   assert(o != nullptr, "");
1994 
1995   if (o == top())  return nullptr; // TOP always merges into TOP
1996 
1997   if (o->is_Phi() && o->as_Phi()->region() == region) {
1998     return o->as_Phi();
1999   }




2000 
2001   // Now use a Phi here for merging
2002   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2003   const JVMState* jvms = map->jvms();
2004   const Type* t = nullptr;
2005   if (jvms->is_loc(idx)) {
2006     t = block()->local_type_at(idx - jvms->locoff());
2007   } else if (jvms->is_stk(idx)) {
2008     t = block()->stack_type_at(idx - jvms->stkoff());
2009   } else if (jvms->is_mon(idx)) {
2010     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2011     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2012   } else if ((uint)idx < TypeFunc::Parms) {
2013     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2014   } else {
2015     assert(false, "no type information for this phi");
2016   }
2017 
2018   // If the type falls to bottom, then this must be a local that
2019   // is mixing ints and oops or some such.  Forcing it to top
2020   // makes it go dead.
2021   if (t == Type::BOTTOM) {
2022     map->set_req(idx, top());
2023     return nullptr;
2024   }
2025 
2026   // Do not create phis for top either.
2027   // A top on a non-null control flow must be an unused even after the.phi.
2028   if (t == Type::TOP || t == Type::HALF) {
2029     map->set_req(idx, top());
2030     return nullptr;
2031   }
2032 
2033   PhiNode* phi = PhiNode::make(region, o, t);
2034   gvn().set_type(phi, t);
2035   if (C->do_escape_analysis()) record_for_igvn(phi);
2036   map->set_req(idx, phi);
2037   return phi;









2038 }
2039 
2040 //--------------------------ensure_memory_phi----------------------------------
2041 // Turn the idx'th slice of the current memory into a Phi
2042 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2043   MergeMemNode* mem = merged_memory();
2044   Node* region = control();
2045   assert(region->is_Region(), "");
2046 
2047   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2048   assert(o != nullptr && o != top(), "");
2049 
2050   PhiNode* phi;
2051   if (o->is_Phi() && o->as_Phi()->region() == region) {
2052     phi = o->as_Phi();
2053     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2054       // clone the shared base memory phi to make a new memory split
2055       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2056       const Type* t = phi->bottom_type();
2057       const TypePtr* adr_type = C->get_adr_type(idx);

2185     Node* chk   = _gvn.transform( new CmpINode(opq, profile_state) );
2186     Node* tst   = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2187     // Branch to failure if state was changed
2188     { BuildCutout unless(this, tst, PROB_ALWAYS);
2189       uncommon_trap(Deoptimization::Reason_rtm_state_change,
2190                     Deoptimization::Action_make_not_entrant);
2191     }
2192   }
2193 #endif
2194 }
2195 
2196 //------------------------------return_current---------------------------------
2197 // Append current _map to _exit_return
2198 void Parse::return_current(Node* value) {
2199   if (RegisterFinalizersAtInit &&
2200       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2201     call_register_finalizer();
2202   }
2203 
2204   // Do not set_parse_bci, so that return goo is credited to the return insn.
2205   set_bci(InvocationEntryBci);



2206   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2207     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2208   }
2209   if (C->env()->dtrace_method_probes()) {
2210     make_dtrace_method_exit(method());
2211   }


































2212   SafePointNode* exit_return = _exits.map();
2213   exit_return->in( TypeFunc::Control  )->add_req( control() );
2214   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2215   Node *mem = exit_return->in( TypeFunc::Memory   );
2216   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2217     if (mms.is_empty()) {
2218       // get a copy of the base memory, and patch just this one input
2219       const TypePtr* adr_type = mms.adr_type(C);
2220       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2221       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2222       gvn().set_type_bottom(phi);
2223       phi->del_req(phi->req()-1);  // prepare to re-patch
2224       mms.set_memory(phi);
2225     }
2226     mms.memory()->add_req(mms.memory2());
2227   }
2228 
2229   // frame pointer is always same, already captured
2230   if (value != nullptr) {
2231     // If returning oops to an interface-return, there is a silent free
2232     // cast from oop to interface allowed by the Verifier.  Make it explicit
2233     // here.
2234     Node* phi = _exits.argument(0);
2235     phi->add_req(value);
2236   }
2237 
2238   if (_first_return) {
2239     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2240     _first_return = false;
2241   } else {
2242     _exits.map()->merge_replaced_nodes_with(map());
2243   }
2244 
2245   stop_and_kill_map();          // This CFG path dies here
2246 }
2247 
2248 
2249 //------------------------------add_safepoint----------------------------------
2250 void Parse::add_safepoint() {
2251   uint parms = TypeFunc::Parms+1;
2252 
2253   // Clear out dead values from the debug info.
2254   kill_dead_locals();
2255 
2256   // Clone the JVM State
2257   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);

  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "interpreter/linkResolver.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/method.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/c2compiler.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/convertnode.hpp"
  34 #include "opto/idealGraphPrinter.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/locknode.hpp"
  37 #include "opto/memnode.hpp"
  38 #include "opto/opaquenode.hpp"
  39 #include "opto/parse.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/type.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/bitMap.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 // Static array so we can figure out which bytecodes stop us from compiling
  50 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  51 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  52 
  53 #ifndef PRODUCT
  54 uint nodes_created             = 0;
  55 uint methods_parsed            = 0;

  87   }
  88   if (all_null_checks_found) {
  89     tty->print_cr("%u made implicit (%2u%%)", implicit_null_checks,
  90                   (100*implicit_null_checks)/all_null_checks_found);
  91   }
  92   if (SharedRuntime::_implicit_null_throws) {
  93     tty->print_cr("%u implicit null exceptions at runtime",
  94                   SharedRuntime::_implicit_null_throws);
  95   }
  96 
  97   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  98     BytecodeParseHistogram::print();
  99   }
 100 }
 101 #endif
 102 
 103 //------------------------------ON STACK REPLACEMENT---------------------------
 104 
 105 // Construct a node which can be used to get incoming state for
 106 // on stack replacement.
 107 Node* Parse::fetch_interpreter_state(int index,
 108                                      const Type* type,
 109                                      Node* local_addrs,
 110                                      Node* local_addrs_base) {
 111   BasicType bt = type->basic_type();
 112   if (type == TypePtr::NULL_PTR) {
 113     // Ptr types are mixed together with T_ADDRESS but nullptr is
 114     // really for T_OBJECT types so correct it.
 115     bt = T_OBJECT;
 116   }
 117   Node *mem = memory(Compile::AliasIdxRaw);
 118   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 119   Node *ctl = control();
 120 
 121   // Very similar to LoadNode::make, except we handle un-aligned longs and
 122   // doubles on Sparc.  Intel can handle them just fine directly.
 123   Node *l = nullptr;
 124   switch (bt) {                // Signature is flattened
 125   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 126   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 127   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 128   case T_PRIMITIVE_OBJECT:
 129   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 130   case T_LONG:
 131   case T_DOUBLE: {
 132     // Since arguments are in reverse order, the argument address 'adr'
 133     // refers to the back half of the long/double.  Recompute adr.
 134     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 135     if (Matcher::misaligned_doubles_ok) {
 136       l = (bt == T_DOUBLE)
 137         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 138         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 139     } else {
 140       l = (bt == T_DOUBLE)
 141         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 142         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 143     }
 144     break;
 145   }
 146   default: ShouldNotReachHere();
 147   }
 148   return _gvn.transform(l);
 149 }
 150 
 151 // Helper routine to prevent the interpreter from handing
 152 // unexpected typestate to an OSR method.
 153 // The Node l is a value newly dug out of the interpreter frame.
 154 // The type is the type predicted by ciTypeFlow.  Note that it is
 155 // not a general type, but can only come from Type::get_typeflow_type.
 156 // The safepoint is a map which will feed an uncommon trap.
 157 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 158                                     SafePointNode* &bad_type_exit) {

 159   const TypeOopPtr* tp = type->isa_oopptr();
 160 
 161   // TypeFlow may assert null-ness if a type appears unloaded.
 162   if (type == TypePtr::NULL_PTR ||
 163       (tp != nullptr && !tp->is_loaded())) {
 164     // Value must be null, not a real oop.
 165     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 166     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 167     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 168     set_control(_gvn.transform( new IfTrueNode(iff) ));
 169     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 170     bad_type_exit->control()->add_req(bad_type);
 171     l = null();
 172   }
 173 
 174   // Typeflow can also cut off paths from the CFG, based on
 175   // types which appear unloaded, or call sites which appear unlinked.
 176   // When paths are cut off, values at later merge points can rise
 177   // toward more specific classes.  Make sure these specific classes
 178   // are still in effect.
 179   if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) {
 180     // TypeFlow asserted a specific object type.  Value must have that type.
 181     Node* bad_type_ctrl = nullptr;
 182     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
 183       // Check inline types for null here to prevent checkcast from adding an
 184       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
 185       l = null_check_oop(l, &bad_type_ctrl);
 186       bad_type_exit->control()->add_req(bad_type_ctrl);
 187     }
 188     l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl);
 189     bad_type_exit->control()->add_req(bad_type_ctrl);
 190   }
 191 
 192   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 193   return l;
 194 }
 195 
 196 // Helper routine which sets up elements of the initial parser map when
 197 // performing a parse for on stack replacement.  Add values into map.
 198 // The only parameter contains the address of a interpreter arguments.
 199 void Parse::load_interpreter_state(Node* osr_buf) {
 200   int index;
 201   int max_locals = jvms()->loc_size();
 202   int max_stack  = jvms()->stk_size();
 203 

 204   // Mismatch between method and jvms can occur since map briefly held
 205   // an OSR entry state (which takes up one RawPtr word).
 206   assert(max_locals == method()->max_locals(), "sanity");
 207   assert(max_stack  >= method()->max_stack(),  "sanity");
 208   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 209   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 210 
 211   // Find the start block.
 212   Block* osr_block = start_block();
 213   assert(osr_block->start() == osr_bci(), "sanity");
 214 
 215   // Set initial BCI.
 216   set_parse_bci(osr_block->start());
 217 
 218   // Set initial stack depth.
 219   set_sp(osr_block->start_sp());
 220 
 221   // Check bailouts.  We currently do not perform on stack replacement
 222   // of loops in catch blocks or loops which branch with a non-empty stack.
 223   if (sp() != 0) {
 224     C->record_method_not_compilable("OSR starts with non-empty stack");
 225     return;
 226   }
 227   // Do not OSR inside finally clauses:
 228   if (osr_block->has_trap_at(osr_block->start())) {
 229     assert(false, "OSR starts with an immediate trap");
 230     C->record_method_not_compilable("OSR starts with an immediate trap");
 231     return;
 232   }
 233 
 234   // Commute monitors from interpreter frame to compiler frame.
 235   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 236   int mcnt = osr_block->flow()->monitor_count();
 237   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 238   for (index = 0; index < mcnt; index++) {
 239     // Make a BoxLockNode for the monitor.
 240     Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
 241 

 242     // Displaced headers and locked objects are interleaved in the
 243     // temp OSR buffer.  We only copy the locked objects out here.
 244     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 245     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
 246     // Try and copy the displaced header to the BoxNode
 247     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);

 248 
 249     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 250 
 251     // Build a bogus FastLockNode (no code will be generated) and push the
 252     // monitor into our debug info.
 253     const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
 254     map()->push_monitor(flock);
 255 
 256     // If the lock is our method synchronization lock, tuck it away in
 257     // _sync_lock for return and rethrow exit paths.
 258     if (index == 0 && method()->is_synchronized()) {
 259       _synch_lock = flock;
 260     }
 261   }
 262 
 263   // Use the raw liveness computation to make sure that unexpected
 264   // values don't propagate into the OSR frame.
 265   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 266   if (!live_locals.is_valid()) {
 267     // Degenerate or breakpointed method.

 295         if (C->log() != nullptr) {
 296           C->log()->elem("OSR_mismatch local_index='%d'",index);
 297         }
 298         set_local(index, null());
 299         // and ignore it for the loads
 300         continue;
 301       }
 302     }
 303 
 304     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 305     if (type == Type::TOP || type == Type::HALF) {
 306       continue;
 307     }
 308     // If the type falls to bottom, then this must be a local that
 309     // is mixing ints and oops or some such.  Forcing it to top
 310     // makes it go dead.
 311     if (type == Type::BOTTOM) {
 312       continue;
 313     }
 314     // Construct code to access the appropriate local.
 315     Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);






 316     set_local(index, value);
 317   }
 318 
 319   // Extract the needed stack entries from the interpreter frame.
 320   for (index = 0; index < sp(); index++) {
 321     const Type *type = osr_block->stack_type_at(index);
 322     if (type != Type::TOP) {
 323       // Currently the compiler bails out when attempting to on stack replace
 324       // at a bci with a non-empty stack.  We should not reach here.
 325       ShouldNotReachHere();
 326     }
 327   }
 328 
 329   // End the OSR migration
 330   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 331                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 332                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 333                     osr_buf);
 334 
 335   // Now that the interpreter state is loaded, make sure it will match

 498     log->elem("observe that='has_exception_handlers'");
 499   }
 500 
 501   assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier");
 502   assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier");
 503 
 504   // Always register dependence if JVMTI is enabled, because
 505   // either breakpoint setting or hotswapping of methods may
 506   // cause deoptimization.
 507   if (C->env()->jvmti_can_hotswap_or_post_breakpoint()) {
 508     C->dependencies()->assert_evol_method(method());
 509   }
 510 
 511   NOT_PRODUCT(methods_seen++);
 512 
 513   // Do some special top-level things.
 514   if (depth() == 1 && C->is_osr_compilation()) {
 515     _entry_bci = C->entry_bci();
 516     _flow = method()->get_osr_flow_analysis(osr_bci());
 517     if (_flow->failing()) {
 518       // TODO Adding a trap due to an unloaded return type in ciTypeFlow::StateVector::do_invoke
 519       // can lead to this. Re-enable once 8284443 is fixed.
 520       // assert(false, "type flow analysis failed for OSR compilation");
 521       C->record_method_not_compilable(_flow->failure_reason());
 522 #ifndef PRODUCT
 523       if (PrintOpto && (Verbose || WizardMode)) {
 524         tty->print_cr("OSR @%d type flow bailout: %s", _entry_bci, _flow->failure_reason());
 525         if (Verbose) {
 526           method()->print();
 527           method()->print_codes();
 528           _flow->print();
 529         }
 530       }
 531 #endif
 532     }
 533     _tf = C->tf();     // the OSR entry type is different
 534   }
 535 
 536 #ifdef ASSERT
 537   if (depth() == 1) {
 538     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
 539   } else {
 540     assert(!this->is_osr_parse(), "no recursive OSR");

 588     do_method_entry();
 589   }
 590 
 591   if (depth() == 1 && !failing()) {
 592     if (C->clinit_barrier_on_entry()) {
 593       // Add check to deoptimize the nmethod once the holder class is fully initialized
 594       clinit_deopt();
 595     }
 596 
 597     // Add check to deoptimize the nmethod if RTM state was changed
 598     rtm_deopt();
 599   }
 600 
 601   // Check for bailouts during method entry or RTM state check setup.
 602   if (failing()) {
 603     if (log)  log->done("parse");
 604     C->set_default_node_notes(caller_nn);
 605     return;
 606   }
 607 
 608   // Handle inline type arguments
 609   int arg_size = method()->arg_size();
 610   for (int i = 0; i < arg_size; i++) {
 611     Node* parm = local(i);
 612     const Type* t = _gvn.type(parm);
 613     if (t->is_inlinetypeptr()) {
 614       // Create InlineTypeNode from the oop and replace the parameter
 615       Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null());
 616       set_local(i, vt);
 617     } else if (UseTypeSpeculation && (i == (arg_size - 1)) && !is_osr_parse() && method()->has_vararg() &&
 618                t->isa_aryptr() != nullptr && !t->is_aryptr()->is_null_free() && !t->is_aryptr()->is_not_null_free()) {
 619       // Speculate on varargs Object array being not null-free (and therefore also not flat)
 620       const TypePtr* spec_type = t->speculative();
 621       spec_type = (spec_type != nullptr && spec_type->isa_aryptr() != nullptr) ? spec_type : t->is_aryptr();
 622       spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
 623       spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
 624       Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
 625       set_local(i, cast);
 626     }
 627   }
 628 
 629   entry_map = map();  // capture any changes performed by method setup code
 630   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 631 
 632   // We begin parsing as if we have just encountered a jump to the
 633   // method entry.
 634   Block* entry_block = start_block();
 635   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 636   set_map_clone(entry_map);
 637   merge_common(entry_block, entry_block->next_path_num());
 638 
 639 #ifndef PRODUCT
 640   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 641   set_parse_histogram( parse_histogram_obj );
 642 #endif
 643 
 644   // Parse all the basic blocks.
 645   do_all_blocks();
 646 
 647   // Check for bailouts during conversion to graph
 648   if (failing()) {

 794 void Parse::build_exits() {
 795   // make a clone of caller to prevent sharing of side-effects
 796   _exits.set_map(_exits.clone_map());
 797   _exits.clean_stack(_exits.sp());
 798   _exits.sync_jvms();
 799 
 800   RegionNode* region = new RegionNode(1);
 801   record_for_igvn(region);
 802   gvn().set_type_bottom(region);
 803   _exits.set_control(region);
 804 
 805   // Note:  iophi and memphi are not transformed until do_exits.
 806   Node* iophi  = new PhiNode(region, Type::ABIO);
 807   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 808   gvn().set_type_bottom(iophi);
 809   gvn().set_type_bottom(memphi);
 810   _exits.set_i_o(iophi);
 811   _exits.set_all_memory(memphi);
 812 
 813   // Add a return value to the exit state.  (Do not push it yet.)
 814   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 815     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 816     if (ret_type->isa_int()) {
 817       BasicType ret_bt = method()->return_type()->basic_type();
 818       if (ret_bt == T_BOOLEAN ||
 819           ret_bt == T_CHAR ||
 820           ret_bt == T_BYTE ||
 821           ret_bt == T_SHORT) {
 822         ret_type = TypeInt::INT;
 823       }
 824     }
 825 
 826     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 827     // becomes loaded during the subsequent parsing, the loaded and unloaded
 828     // types will not join when we transform and push in do_exits().
 829     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 830     if (ret_oop_type && !ret_oop_type->is_loaded()) {
 831       ret_type = TypeOopPtr::BOTTOM;
 832     }
 833     int         ret_size = type2size[ret_type->basic_type()];
 834     Node*       ret_phi  = new PhiNode(region, ret_type);
 835     gvn().set_type_bottom(ret_phi);
 836     _exits.ensure_stack(ret_size);
 837     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 838     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 839     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 840     // Note:  ret_phi is not yet pushed, until do_exits.
 841   }
 842 }
 843 

 844 //----------------------------build_start_state-------------------------------
 845 // Construct a state which contains only the incoming arguments from an
 846 // unknown caller.  The method & bci will be null & InvocationEntryBci.
 847 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 848   int        arg_size = tf->domain_sig()->cnt();
 849   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
 850   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 851   SafePointNode* map  = new SafePointNode(max_size, jvms);
 852   jvms->set_map(map);
 853   record_for_igvn(map);
 854   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 855   Node_Notes* old_nn = default_node_notes();
 856   if (old_nn != nullptr && has_method()) {
 857     Node_Notes* entry_nn = old_nn->clone(this);
 858     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 859     entry_jvms->set_offsets(0);
 860     entry_jvms->set_bci(entry_bci());
 861     entry_nn->set_jvms(entry_jvms);
 862     set_default_node_notes(entry_nn);
 863   }
 864   PhaseGVN& gvn = *initial_gvn();
 865   uint i = 0;
 866   int arg_num = 0;
 867   for (uint j = 0; i < (uint)arg_size; i++) {
 868     const Type* t = tf->domain_sig()->field_at(i);
 869     Node* parm = nullptr;
 870     if (t->is_inlinetypeptr() && method()->is_scalarized_arg(arg_num)) {
 871       // Inline type arguments are not passed by reference: we get an argument per
 872       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 873       GraphKit kit(jvms, &gvn);
 874       kit.set_control(map->control());
 875       Node* old_mem = map->memory();
 876       // Use immutable memory for inline type loads and restore it below
 877       kit.set_all_memory(C->immutable_memory());
 878       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 879       map->set_control(kit.control());
 880       map->set_memory(old_mem);
 881     } else {
 882       parm = gvn.transform(new ParmNode(start, j++));
 883     }
 884     map->init_req(i, parm);
 885     // Record all these guys for later GVN.
 886     record_for_igvn(parm);
 887     if (i >= TypeFunc::Parms && t != Type::HALF) {
 888       arg_num++;
 889     }
 890   }
 891   for (; i < map->req(); i++) {
 892     map->init_req(i, top());
 893   }
 894   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 895   set_default_node_notes(old_nn);

 896   return jvms;
 897 }
 898 
 899 //-----------------------------make_node_notes---------------------------------
 900 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 901   if (caller_nn == nullptr)  return nullptr;
 902   Node_Notes* nn = caller_nn->clone(C);
 903   JVMState* caller_jvms = nn->jvms();
 904   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 905   jvms->set_offsets(0);
 906   jvms->set_bci(_entry_bci);
 907   nn->set_jvms(jvms);
 908   return nn;
 909 }
 910 
 911 
 912 //--------------------------return_values--------------------------------------
 913 void Compile::return_values(JVMState* jvms) {
 914   GraphKit kit(jvms);
 915   Node* ret = new ReturnNode(TypeFunc::Parms,
 916                              kit.control(),
 917                              kit.i_o(),
 918                              kit.reset_memory(),
 919                              kit.frameptr(),
 920                              kit.returnadr());
 921   // Add zero or 1 return values
 922   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
 923   if (ret_size > 0) {
 924     kit.inc_sp(-ret_size);  // pop the return value(s)
 925     kit.sync_jvms();
 926     Node* res = kit.argument(0);
 927     if (tf()->returns_inline_type_as_fields()) {
 928       // Multiple return values (inline type fields): add as many edges
 929       // to the Return node as returned values.
 930       InlineTypeNode* vt = res->as_InlineType();
 931       ret->add_req_batch(nullptr, tf()->range_cc()->cnt() - TypeFunc::Parms);
 932       if (vt->is_allocated(&kit.gvn()) && !StressCallingConvention) {
 933         ret->init_req(TypeFunc::Parms, vt->get_oop());
 934       } else {
 935         // Return the tagged klass pointer to signal scalarization to the caller
 936         Node* tagged_klass = vt->tagged_klass(kit.gvn());
 937         if (!method()->signature()->returns_null_free_inline_type()) {
 938           // Return null if the inline type is null (IsInit field is not set)
 939           Node* conv   = kit.gvn().transform(new ConvI2LNode(vt->get_is_init()));
 940           Node* shl    = kit.gvn().transform(new LShiftLNode(conv, kit.intcon(63)));
 941           Node* shr    = kit.gvn().transform(new RShiftLNode(shl, kit.intcon(63)));
 942           tagged_klass = kit.gvn().transform(new AndLNode(tagged_klass, shr));
 943         }
 944         ret->init_req(TypeFunc::Parms, tagged_klass);
 945       }
 946       uint idx = TypeFunc::Parms + 1;
 947       vt->pass_fields(&kit, ret, idx, false, method()->signature()->returns_null_free_inline_type());
 948     } else {
 949       ret->add_req(res);
 950       // Note:  The second dummy edge is not needed by a ReturnNode.
 951     }
 952   }
 953   // bind it to root
 954   root()->add_req(ret);
 955   record_for_igvn(ret);
 956   initial_gvn()->transform_no_reclaim(ret);
 957 }
 958 
 959 //------------------------rethrow_exceptions-----------------------------------
 960 // Bind all exception states in the list into a single RethrowNode.
 961 void Compile::rethrow_exceptions(JVMState* jvms) {
 962   GraphKit kit(jvms);
 963   if (!kit.has_exceptions())  return;  // nothing to generate
 964   // Load my combined exception state into the kit, with all phis transformed:
 965   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 966   Node* ex_oop = kit.use_exception_state(ex_map);
 967   RethrowNode* exit = new RethrowNode(kit.control(),
 968                                       kit.i_o(), kit.reset_memory(),
 969                                       kit.frameptr(), kit.returnadr(),
 970                                       // like a return but with exception input
 971                                       ex_oop);

1055   //    to complete, we force all writes to complete.
1056   //
1057   // 2. Experimental VM option is used to force the barrier if any field
1058   //    was written out in the constructor.
1059   //
1060   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1061   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1062   //    MemBarVolatile is used before volatile load instead of after volatile
1063   //    store, so there's no barrier after the store.
1064   //    We want to guarantee the same behavior as on platforms with total store
1065   //    order, although this is not required by the Java memory model.
1066   //    In this case, we want to enforce visibility of volatile field
1067   //    initializations which are performed in constructors.
1068   //    So as with finals, we add a barrier here.
1069   //
1070   // "All bets are off" unless the first publication occurs after a
1071   // normal return from the constructor.  We do not attempt to detect
1072   // such unusual early publications.  But no barrier is needed on
1073   // exceptional returns, since they cannot publish normally.
1074   //
1075   if (method()->is_object_constructor_or_class_initializer() &&
1076        (wrote_final() ||
1077          (AlwaysSafeConstructors && wrote_fields()) ||
1078          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1079     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1080 
1081     // If Memory barrier is created for final fields write
1082     // and allocation node does not escape the initialize method,
1083     // then barrier introduced by allocation node can be removed.
1084     if (DoEscapeAnalysis && alloc_with_final()) {
1085       AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final());
1086       alloc->compute_MemBar_redundancy(method());
1087     }
1088     if (PrintOpto && (Verbose || WizardMode)) {
1089       method()->print_name();
1090       tty->print_cr(" writes finals and needs a memory barrier");
1091     }
1092   }
1093 
1094   // Any method can write a @Stable field; insert memory barriers
1095   // after those also. Can't bind predecessor allocation node (if any)
1096   // with barrier because allocation doesn't always dominate
1097   // MemBarRelease.
1098   if (wrote_stable()) {
1099     _exits.insert_mem_bar(Op_MemBarRelease);
1100     if (PrintOpto && (Verbose || WizardMode)) {
1101       method()->print_name();
1102       tty->print_cr(" writes @Stable and needs a memory barrier");
1103     }
1104   }
1105 
1106   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1107     // transform each slice of the original memphi:
1108     mms.set_memory(_gvn.transform(mms.memory()));
1109   }
1110   // Clean up input MergeMems created by transforming the slices
1111   _gvn.transform(_exits.merged_memory());
1112 
1113   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1114     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1115     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1116     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1117       // If the type we set for the ret_phi in build_exits() is too optimistic and
1118       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1119       // loading.  It could also be due to an error, so mark this method as not compilable because
1120       // otherwise this could lead to an infinite compile loop.
1121       // In any case, this code path is rarely (and never in my testing) reached.
1122 #ifdef ASSERT
1123       tty->print_cr("# Can't determine return type.");
1124       tty->print_cr("# exit control");
1125       _exits.control()->dump(2);
1126       tty->print_cr("# ret phi type");
1127       _gvn.type(ret_phi)->dump();
1128       tty->print_cr("# ret phi");
1129       ret_phi->dump(2);
1130 #endif // ASSERT
1131       assert(false, "Can't determine return type.");
1132       C->record_method_not_compilable("Can't determine return type.");
1133       return;
1134     }

1198 
1199 //-----------------------------create_entry_map-------------------------------
1200 // Initialize our parser map to contain the types at method entry.
1201 // For OSR, the map contains a single RawPtr parameter.
1202 // Initial monitor locking for sync. methods is performed by do_method_entry.
1203 SafePointNode* Parse::create_entry_map() {
1204   // Check for really stupid bail-out cases.
1205   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1206   if (len >= 32760) {
1207     // Bailout expected, this is a very rare edge case.
1208     C->record_method_not_compilable("too many local variables");
1209     return nullptr;
1210   }
1211 
1212   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1213   _caller->map()->delete_replaced_nodes();
1214 
1215   // If this is an inlined method, we may have to do a receiver null check.
1216   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1217     GraphKit kit(_caller);
1218     kit.null_check_receiver_before_call(method(), false);
1219     _caller = kit.transfer_exceptions_into_jvms();
1220     if (kit.stopped()) {
1221       _exits.add_exception_states_from(_caller);
1222       _exits.set_jvms(_caller);
1223       return nullptr;
1224     }
1225   }
1226 
1227   assert(method() != nullptr, "parser must have a method");
1228 
1229   // Create an initial safepoint to hold JVM state during parsing
1230   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr);
1231   set_map(new SafePointNode(len, jvms));
1232   jvms->set_map(map());
1233   record_for_igvn(map());
1234   assert(jvms->endoff() == len, "correct jvms sizing");
1235 
1236   SafePointNode* inmap = _caller->map();
1237   assert(inmap != nullptr, "must have inmap");
1238   // In case of null check on receiver above
1239   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1240 
1241   uint i;
1242 
1243   // Pass thru the predefined input parameters.
1244   for (i = 0; i < TypeFunc::Parms; i++) {
1245     map()->init_req(i, inmap->in(i));
1246   }
1247 
1248   if (depth() == 1) {
1249     assert(map()->memory()->Opcode() == Op_Parm, "");
1250     // Insert the memory aliasing node
1251     set_all_memory(reset_memory());
1252   }
1253   assert(merged_memory(), "");
1254 
1255   // Now add the locals which are initially bound to arguments:
1256   uint arg_size = tf()->domain_sig()->cnt();
1257   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1258   for (i = TypeFunc::Parms; i < arg_size; i++) {
1259     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1260   }
1261 
1262   // Clear out the rest of the map (locals and stack)
1263   for (i = arg_size; i < len; i++) {
1264     map()->init_req(i, top());
1265   }
1266 
1267   SafePointNode* entry_map = stop();
1268   return entry_map;
1269 }
1270 
1271 //-----------------------------do_method_entry--------------------------------
1272 // Emit any code needed in the pseudo-block before BCI zero.
1273 // The main thing to do is lock the receiver of a synchronized method.
1274 void Parse::do_method_entry() {
1275   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1276   set_sp(0);                         // Java Stack Pointer

1310 
1311   // If the method is synchronized, we need to construct a lock node, attach
1312   // it to the Start node, and pin it there.
1313   if (method()->is_synchronized()) {
1314     // Insert a FastLockNode right after the Start which takes as arguments
1315     // the current thread pointer, the "this" pointer & the address of the
1316     // stack slot pair used for the lock.  The "this" pointer is a projection
1317     // off the start node, but the locking spot has to be constructed by
1318     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1319     // becomes the second argument to the FastLockNode call.  The
1320     // FastLockNode becomes the new control parent to pin it to the start.
1321 
1322     // Setup Object Pointer
1323     Node *lock_obj = nullptr;
1324     if (method()->is_static()) {
1325       ciInstance* mirror = _method->holder()->java_mirror();
1326       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1327       lock_obj = makecon(t_lock);
1328     } else {                  // Else pass the "this" pointer,
1329       lock_obj = local(0);    // which is Parm0 from StartNode
1330       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1331     }
1332     // Clear out dead values from the debug info.
1333     kill_dead_locals();
1334     // Build the FastLockNode
1335     _synch_lock = shared_lock(lock_obj);
1336   }
1337 
1338   // Feed profiling data for parameters to the type system so it can
1339   // propagate it as speculative types
1340   record_profiled_parameters_for_speculation();
1341 }
1342 
1343 //------------------------------init_blocks------------------------------------
1344 // Initialize our parser map to contain the types/monitors at method entry.
1345 void Parse::init_blocks() {
1346   // Create the blocks.
1347   _block_count = flow()->block_count();
1348   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1349 
1350   // Initialize the structs.

1727 //--------------------handle_missing_successor---------------------------------
1728 void Parse::handle_missing_successor(int target_bci) {
1729 #ifndef PRODUCT
1730   Block* b = block();
1731   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1732   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1733 #endif
1734   ShouldNotReachHere();
1735 }
1736 
1737 //--------------------------merge_common---------------------------------------
1738 void Parse::merge_common(Parse::Block* target, int pnum) {
1739   if (TraceOptoParse) {
1740     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1741   }
1742 
1743   // Zap extra stack slots to top
1744   assert(sp() == target->start_sp(), "");
1745   clean_stack(sp());
1746 
1747   // Check for merge conflicts involving inline types
1748   JVMState* old_jvms = map()->jvms();
1749   int old_bci = bci();
1750   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1751   tmp_jvms->set_should_reexecute(true);
1752   tmp_jvms->bind_map(map());
1753   // Execution needs to restart a the next bytecode (entry of next
1754   // block)
1755   if (target->is_merged() ||
1756       pnum > PhiNode::Input ||
1757       target->is_handler() ||
1758       target->is_loop_head()) {
1759     set_parse_bci(target->start());
1760     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1761       Node* n = map()->in(j);                 // Incoming change to target state.
1762       const Type* t = nullptr;
1763       if (tmp_jvms->is_loc(j)) {
1764         t = target->local_type_at(j - tmp_jvms->locoff());
1765       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1766         t = target->stack_type_at(j - tmp_jvms->stkoff());
1767       }
1768       if (t != nullptr && t != Type::BOTTOM) {
1769         if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1770           // Allocate inline type in src block to be able to merge it with oop in target block
1771           map()->set_req(j, n->as_InlineType()->buffer(this));
1772         } else if (!n->is_InlineType() && t->is_inlinetypeptr()) {
1773           // Scalarize null in src block to be able to merge it with inline type in target block
1774           assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1775           map()->set_req(j, InlineTypeNode::make_null(gvn(), t->inline_klass()));
1776         }
1777       }
1778     }
1779   }
1780   old_jvms->bind_map(map());
1781   set_parse_bci(old_bci);
1782 
1783   if (!target->is_merged()) {   // No prior mapping at this bci
1784     if (TraceOptoParse) { tty->print(" with empty state");  }
1785 
1786     // If this path is dead, do not bother capturing it as a merge.
1787     // It is "as if" we had 1 fewer predecessors from the beginning.
1788     if (stopped()) {
1789       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1790       return;
1791     }
1792 
1793     // Make a region if we know there are multiple or unpredictable inputs.
1794     // (Also, if this is a plain fall-through, we might see another region,
1795     // which must not be allowed into this block's map.)
1796     if (pnum > PhiNode::Input         // Known multiple inputs.
1797         || target->is_handler()       // These have unpredictable inputs.
1798         || target->is_loop_head()     // Known multiple inputs
1799         || control()->is_Region()) {  // We must hide this guy.
1800 
1801       int current_bci = bci();
1802       set_parse_bci(target->start()); // Set target bci

1817       record_for_igvn(r);
1818       // zap all inputs to null for debugging (done in Node(uint) constructor)
1819       // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); }
1820       r->init_req(pnum, control());
1821       set_control(r);
1822       target->copy_irreducible_status_to(r, jvms());
1823       set_parse_bci(current_bci); // Restore bci
1824     }
1825 
1826     // Convert the existing Parser mapping into a mapping at this bci.
1827     store_state_to(target);
1828     assert(target->is_merged(), "do not come here twice");
1829 
1830   } else {                      // Prior mapping at this bci
1831     if (TraceOptoParse) {  tty->print(" with previous state"); }
1832 #ifdef ASSERT
1833     if (target->is_SEL_head()) {
1834       target->mark_merged_backedge(block());
1835     }
1836 #endif
1837 
1838     // We must not manufacture more phis if the target is already parsed.
1839     bool nophi = target->is_parsed();
1840 
1841     SafePointNode* newin = map();// Hang on to incoming mapping
1842     Block* save_block = block(); // Hang on to incoming block;
1843     load_state_from(target);    // Get prior mapping
1844 
1845     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1846     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1847     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1848     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1849 
1850     // Iterate over my current mapping and the old mapping.
1851     // Where different, insert Phi functions.
1852     // Use any existing Phi functions.
1853     assert(control()->is_Region(), "must be merging to a region");
1854     RegionNode* r = control()->as_Region();
1855 
1856     // Compute where to merge into
1857     // Merge incoming control path
1858     r->init_req(pnum, newin->control());
1859 
1860     if (pnum == 1) {            // Last merge for this Region?
1861       if (!block()->flow()->is_irreducible_loop_secondary_entry()) {
1862         Node* result = _gvn.transform_no_reclaim(r);
1863         if (r != result && TraceOptoParse) {
1864           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1865         }
1866       }
1867       record_for_igvn(r);
1868     }
1869 
1870     // Update all the non-control inputs to map:
1871     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1872     bool check_elide_phi = target->is_SEL_backedge(save_block);
1873     bool last_merge = (pnum == PhiNode::Input);
1874     for (uint j = 1; j < newin->req(); j++) {
1875       Node* m = map()->in(j);   // Current state of target.
1876       Node* n = newin->in(j);   // Incoming change to target state.
1877       PhiNode* phi;
1878       if (m->is_Phi() && m->as_Phi()->region() == r) {
1879         phi = m->as_Phi();
1880       } else if (m->is_InlineType() && m->as_InlineType()->has_phi_inputs(r)) {
1881         phi = m->as_InlineType()->get_oop()->as_Phi();
1882       } else {
1883         phi = nullptr;
1884       }
1885       if (m != n) {             // Different; must merge
1886         switch (j) {
1887         // Frame pointer and Return Address never changes
1888         case TypeFunc::FramePtr:// Drop m, use the original value
1889         case TypeFunc::ReturnAdr:
1890           break;
1891         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1892           assert(phi == nullptr, "the merge contains phis, not vice versa");
1893           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1894           continue;
1895         default:                // All normal stuff
1896           if (phi == nullptr) {
1897             const JVMState* jvms = map()->jvms();
1898             if (EliminateNestedLocks &&
1899                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1900               // BoxLock nodes are not commoning.
1901               // Use old BoxLock node as merged box.
1902               assert(newin->jvms()->is_monitor_box(j), "sanity");
1903               // This assert also tests that nodes are BoxLock.
1904               assert(BoxLockNode::same_slot(n, m), "sanity");
1905               C->gvn_replace_by(n, m);
1906             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1907               phi = ensure_phi(j, nophi);
1908             }
1909           }
1910           break;
1911         }
1912       }
1913       // At this point, n might be top if:
1914       //  - there is no phi (because TypeFlow detected a conflict), or
1915       //  - the corresponding control edges is top (a dead incoming path)
1916       // It is a bug if we create a phi which sees a garbage value on a live path.
1917 
1918       // Merging two inline types?
1919       if (phi != nullptr && phi->bottom_type()->is_inlinetypeptr()) {
1920         // Reload current state because it may have been updated by ensure_phi
1921         m = map()->in(j);
1922         InlineTypeNode* vtm = m->as_InlineType(); // Current inline type
1923         InlineTypeNode* vtn = n->as_InlineType(); // Incoming inline type
1924         assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1925         if (TraceOptoParse) {
1926 #ifdef ASSERT
1927           tty->print_cr("\nMerging inline types");
1928           tty->print_cr("Current:");
1929           vtm->dump(2);
1930           tty->print_cr("Incoming:");
1931           vtn->dump(2);
1932           tty->cr();
1933 #endif
1934         }
1935         // Do the merge
1936         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1937         if (last_merge) {
1938           map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1939           record_for_igvn(vtm);
1940         }
1941       } else if (phi != nullptr) {
1942         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1943         assert(phi->region() == r, "");
1944         phi->set_req(pnum, n);  // Then add 'n' to the merge
1945         if (last_merge) {
1946           // Last merge for this Phi.
1947           // So far, Phis have had a reasonable type from ciTypeFlow.
1948           // Now _gvn will join that with the meet of current inputs.
1949           // BOTTOM is never permissible here, 'cause pessimistically
1950           // Phis of pointers cannot lose the basic pointer type.
1951           debug_only(const Type* bt1 = phi->bottom_type());
1952           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1953           map()->set_req(j, _gvn.transform_no_reclaim(phi));
1954           debug_only(const Type* bt2 = phi->bottom_type());
1955           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1956           record_for_igvn(phi);
1957         }
1958       }
1959     } // End of for all values to be merged
1960 
1961     if (last_merge && !r->in(0)) {         // The occasional useless Region

1962       assert(control() == r, "");
1963       set_control(r->nonnull_req());
1964     }
1965 
1966     map()->merge_replaced_nodes_with(newin);
1967 
1968     // newin has been subsumed into the lazy merge, and is now dead.
1969     set_block(save_block);
1970 
1971     stop();                     // done with this guy, for now
1972   }
1973 
1974   if (TraceOptoParse) {
1975     tty->print_cr(" on path %d", pnum);
1976   }
1977 
1978   // Done with this parser state.
1979   assert(stopped(), "");
1980 }
1981 

2093 
2094   // Add new path to the region.
2095   uint pnum = r->req();
2096   r->add_req(nullptr);
2097 
2098   for (uint i = 1; i < map->req(); i++) {
2099     Node* n = map->in(i);
2100     if (i == TypeFunc::Memory) {
2101       // Ensure a phi on all currently known memories.
2102       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2103         Node* phi = mms.memory();
2104         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2105           assert(phi->req() == pnum, "must be same size as region");
2106           phi->add_req(nullptr);
2107         }
2108       }
2109     } else {
2110       if (n->is_Phi() && n->as_Phi()->region() == r) {
2111         assert(n->req() == pnum, "must be same size as region");
2112         n->add_req(nullptr);
2113       } else if (n->is_InlineType() && n->as_InlineType()->has_phi_inputs(r)) {
2114         n->as_InlineType()->add_new_path(r);
2115       }
2116     }
2117   }
2118 
2119   return pnum;
2120 }
2121 
2122 //------------------------------ensure_phi-------------------------------------
2123 // Turn the idx'th entry of the current map into a Phi
2124 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2125   SafePointNode* map = this->map();
2126   Node* region = map->control();
2127   assert(region->is_Region(), "");
2128 
2129   Node* o = map->in(idx);
2130   assert(o != nullptr, "");
2131 
2132   if (o == top())  return nullptr; // TOP always merges into TOP
2133 
2134   if (o->is_Phi() && o->as_Phi()->region() == region) {
2135     return o->as_Phi();
2136   }
2137   InlineTypeNode* vt = o->isa_InlineType();
2138   if (vt != nullptr && vt->has_phi_inputs(region)) {
2139     return vt->get_oop()->as_Phi();
2140   }
2141 
2142   // Now use a Phi here for merging
2143   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2144   const JVMState* jvms = map->jvms();
2145   const Type* t = nullptr;
2146   if (jvms->is_loc(idx)) {
2147     t = block()->local_type_at(idx - jvms->locoff());
2148   } else if (jvms->is_stk(idx)) {
2149     t = block()->stack_type_at(idx - jvms->stkoff());
2150   } else if (jvms->is_mon(idx)) {
2151     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2152     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2153   } else if ((uint)idx < TypeFunc::Parms) {
2154     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2155   } else {
2156     assert(false, "no type information for this phi");
2157   }
2158 
2159   // If the type falls to bottom, then this must be a local that
2160   // is already dead or is mixing ints and oops or some such.
2161   // Forcing it to top makes it go dead.
2162   if (t == Type::BOTTOM) {
2163     map->set_req(idx, top());
2164     return nullptr;
2165   }
2166 
2167   // Do not create phis for top either.
2168   // A top on a non-null control flow must be an unused even after the.phi.
2169   if (t == Type::TOP || t == Type::HALF) {
2170     map->set_req(idx, top());
2171     return nullptr;
2172   }
2173 
2174   if (vt != nullptr && t->is_inlinetypeptr()) {
2175     // Inline types are merged by merging their field values.
2176     // Create a cloned InlineTypeNode with phi inputs that
2177     // represents the merged inline type and update the map.
2178     vt = vt->clone_with_phis(&_gvn, region);
2179     map->set_req(idx, vt);
2180     return vt->get_oop()->as_Phi();
2181   } else {
2182     PhiNode* phi = PhiNode::make(region, o, t);
2183     gvn().set_type(phi, t);
2184     if (C->do_escape_analysis()) record_for_igvn(phi);
2185     map->set_req(idx, phi);
2186     return phi;
2187   }
2188 }
2189 
2190 //--------------------------ensure_memory_phi----------------------------------
2191 // Turn the idx'th slice of the current memory into a Phi
2192 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2193   MergeMemNode* mem = merged_memory();
2194   Node* region = control();
2195   assert(region->is_Region(), "");
2196 
2197   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2198   assert(o != nullptr && o != top(), "");
2199 
2200   PhiNode* phi;
2201   if (o->is_Phi() && o->as_Phi()->region() == region) {
2202     phi = o->as_Phi();
2203     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2204       // clone the shared base memory phi to make a new memory split
2205       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2206       const Type* t = phi->bottom_type();
2207       const TypePtr* adr_type = C->get_adr_type(idx);

2335     Node* chk   = _gvn.transform( new CmpINode(opq, profile_state) );
2336     Node* tst   = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
2337     // Branch to failure if state was changed
2338     { BuildCutout unless(this, tst, PROB_ALWAYS);
2339       uncommon_trap(Deoptimization::Reason_rtm_state_change,
2340                     Deoptimization::Action_make_not_entrant);
2341     }
2342   }
2343 #endif
2344 }
2345 
2346 //------------------------------return_current---------------------------------
2347 // Append current _map to _exit_return
2348 void Parse::return_current(Node* value) {
2349   if (RegisterFinalizersAtInit &&
2350       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2351     call_register_finalizer();
2352   }
2353 
2354   // Do not set_parse_bci, so that return goo is credited to the return insn.
2355   // vreturn can trigger an allocation so vreturn can throw. Setting
2356   // the bci here breaks exception handling. Commenting this out
2357   // doesn't seem to break anything.
2358   //  set_bci(InvocationEntryBci);
2359   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2360     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2361   }
2362   if (C->env()->dtrace_method_probes()) {
2363     make_dtrace_method_exit(method());
2364   }
2365   // frame pointer is always same, already captured
2366   if (value != nullptr) {
2367     Node* phi = _exits.argument(0);
2368     const Type* return_type = phi->bottom_type();
2369     const TypeInstPtr* tr = return_type->isa_instptr();
2370     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
2371         return_type->is_inlinetypeptr()) {
2372       // Inline type is returned as fields, make sure it is scalarized
2373       if (!value->is_InlineType()) {
2374         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass(), method()->signature()->returns_null_free_inline_type());
2375       }
2376       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2377         // Returning from root or an incrementally inlined method. Make sure all non-flat
2378         // fields are buffered and re-execute if allocation triggers deoptimization.
2379         PreserveReexecuteState preexecs(this);
2380         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2381         jvms()->set_should_reexecute(true);
2382         inc_sp(1);
2383         value = value->as_InlineType()->allocate_fields(this);
2384       }
2385     } else if (value->is_InlineType()) {
2386       // Inline type is returned as oop, make sure it is buffered and re-execute
2387       // if allocation triggers deoptimization.
2388       PreserveReexecuteState preexecs(this);
2389       jvms()->set_should_reexecute(true);
2390       inc_sp(1);
2391       value = value->as_InlineType()->buffer(this);
2392     }
2393     // ...else
2394     // If returning oops to an interface-return, there is a silent free
2395     // cast from oop to interface allowed by the Verifier. Make it explicit here.
2396     phi->add_req(value);
2397   }
2398 
2399   SafePointNode* exit_return = _exits.map();
2400   exit_return->in( TypeFunc::Control  )->add_req( control() );
2401   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2402   Node *mem = exit_return->in( TypeFunc::Memory   );
2403   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2404     if (mms.is_empty()) {
2405       // get a copy of the base memory, and patch just this one input
2406       const TypePtr* adr_type = mms.adr_type(C);
2407       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2408       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2409       gvn().set_type_bottom(phi);
2410       phi->del_req(phi->req()-1);  // prepare to re-patch
2411       mms.set_memory(phi);
2412     }
2413     mms.memory()->add_req(mms.memory2());
2414   }
2415 









2416   if (_first_return) {
2417     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2418     _first_return = false;
2419   } else {
2420     _exits.map()->merge_replaced_nodes_with(map());
2421   }
2422 
2423   stop_and_kill_map();          // This CFG path dies here
2424 }
2425 
2426 
2427 //------------------------------add_safepoint----------------------------------
2428 void Parse::add_safepoint() {
2429   uint parms = TypeFunc::Parms+1;
2430 
2431   // Clear out dead values from the debug info.
2432   kill_dead_locals();
2433 
2434   // Clone the JVM State
2435   SafePointNode *sfpnt = new SafePointNode(parms, nullptr);
< prev index next >