< prev index next >

src/hotspot/share/opto/parse1.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "interpreter/linkResolver.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/method.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/c2compiler.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/idealGraphPrinter.hpp"

  34 #include "opto/locknode.hpp"
  35 #include "opto/memnode.hpp"
  36 #include "opto/opaquenode.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/runtime.hpp"
  40 #include "opto/type.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/bitMap.inline.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 // Static array so we can figure out which bytecodes stop us from compiling
  48 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  49 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  50 
  51 #ifndef PRODUCT
  52 int nodes_created              = 0;
  53 int methods_parsed             = 0;

  85   }
  86   if (all_null_checks_found) {
  87     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
  88                   (100*implicit_null_checks)/all_null_checks_found);
  89   }
  90   if (SharedRuntime::_implicit_null_throws) {
  91     tty->print_cr("%d implicit null exceptions at runtime",
  92                   SharedRuntime::_implicit_null_throws);
  93   }
  94 
  95   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  96     BytecodeParseHistogram::print();
  97   }
  98 }
  99 #endif
 100 
 101 //------------------------------ON STACK REPLACEMENT---------------------------
 102 
 103 // Construct a node which can be used to get incoming state for
 104 // on stack replacement.
 105 Node *Parse::fetch_interpreter_state(int index,
 106                                      BasicType bt,
 107                                      Node *local_addrs,
 108                                      Node *local_addrs_base) {






 109   Node *mem = memory(Compile::AliasIdxRaw);
 110   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 111   Node *ctl = control();
 112 
 113   // Very similar to LoadNode::make, except we handle un-aligned longs and
 114   // doubles on Sparc.  Intel can handle them just fine directly.
 115   Node *l = NULL;
 116   switch (bt) {                // Signature is flattened
 117   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 118   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 119   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;

 120   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 121   case T_LONG:
 122   case T_DOUBLE: {
 123     // Since arguments are in reverse order, the argument address 'adr'
 124     // refers to the back half of the long/double.  Recompute adr.
 125     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 126     if (Matcher::misaligned_doubles_ok) {
 127       l = (bt == T_DOUBLE)
 128         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 129         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 130     } else {
 131       l = (bt == T_DOUBLE)
 132         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 133         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 134     }
 135     break;
 136   }
 137   default: ShouldNotReachHere();
 138   }
 139   return _gvn.transform(l);
 140 }
 141 
 142 // Helper routine to prevent the interpreter from handing
 143 // unexpected typestate to an OSR method.
 144 // The Node l is a value newly dug out of the interpreter frame.
 145 // The type is the type predicted by ciTypeFlow.  Note that it is
 146 // not a general type, but can only come from Type::get_typeflow_type.
 147 // The safepoint is a map which will feed an uncommon trap.
 148 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 149                                     SafePointNode* &bad_type_exit) {
 150 
 151   const TypeOopPtr* tp = type->isa_oopptr();





 152 
 153   // TypeFlow may assert null-ness if a type appears unloaded.
 154   if (type == TypePtr::NULL_PTR ||
 155       (tp != NULL && !tp->klass()->is_loaded())) {
 156     // Value must be null, not a real oop.
 157     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 158     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 159     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 160     set_control(_gvn.transform( new IfTrueNode(iff) ));
 161     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 162     bad_type_exit->control()->add_req(bad_type);
 163     l = null();
 164   }
 165 
 166   // Typeflow can also cut off paths from the CFG, based on
 167   // types which appear unloaded, or call sites which appear unlinked.
 168   // When paths are cut off, values at later merge points can rise
 169   // toward more specific classes.  Make sure these specific classes
 170   // are still in effect.
 171   if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
 172     // TypeFlow asserted a specific object type.  Value must have that type.
 173     Node* bad_type_ctrl = NULL;






 174     l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
 175     bad_type_exit->control()->add_req(bad_type_ctrl);
 176   }
 177 
 178   BasicType bt_l = _gvn.type(l)->basic_type();
 179   BasicType bt_t = type->basic_type();
 180   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 181   return l;
 182 }
 183 
 184 // Helper routine which sets up elements of the initial parser map when
 185 // performing a parse for on stack replacement.  Add values into map.
 186 // The only parameter contains the address of a interpreter arguments.
 187 void Parse::load_interpreter_state(Node* osr_buf) {
 188   int index;
 189   int max_locals = jvms()->loc_size();
 190   int max_stack  = jvms()->stk_size();
 191 
 192 
 193   // Mismatch between method and jvms can occur since map briefly held
 194   // an OSR entry state (which takes up one RawPtr word).
 195   assert(max_locals == method()->max_locals(), "sanity");
 196   assert(max_stack  >= method()->max_stack(),  "sanity");
 197   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 198   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 199 
 200   // Find the start block.
 201   Block* osr_block = start_block();
 202   assert(osr_block->start() == osr_bci(), "sanity");
 203 
 204   // Set initial BCI.
 205   set_parse_bci(osr_block->start());
 206 
 207   // Set initial stack depth.
 208   set_sp(osr_block->start_sp());
 209 
 210   // Check bailouts.  We currently do not perform on stack replacement
 211   // of loops in catch blocks or loops which branch with a non-empty stack.
 212   if (sp() != 0) {
 213     C->record_method_not_compilable("OSR starts with non-empty stack");
 214     return;
 215   }
 216   // Do not OSR inside finally clauses:
 217   if (osr_block->has_trap_at(osr_block->start())) {
 218     C->record_method_not_compilable("OSR starts with an immediate trap");
 219     return;
 220   }
 221 
 222   // Commute monitors from interpreter frame to compiler frame.
 223   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 224   int mcnt = osr_block->flow()->monitor_count();
 225   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 226   for (index = 0; index < mcnt; index++) {
 227     // Make a BoxLockNode for the monitor.
 228     Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
 229 
 230 
 231     // Displaced headers and locked objects are interleaved in the
 232     // temp OSR buffer.  We only copy the locked objects out here.
 233     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 234     Node *lock_object = fetch_interpreter_state(index*2, T_OBJECT, monitors_addr, osr_buf);
 235     // Try and copy the displaced header to the BoxNode
 236     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 237 
 238 
 239     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 240 
 241     // Build a bogus FastLockNode (no code will be generated) and push the
 242     // monitor into our debug info.
 243     const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
 244     map()->push_monitor(flock);
 245 
 246     // If the lock is our method synchronization lock, tuck it away in
 247     // _sync_lock for return and rethrow exit paths.
 248     if (index == 0 && method()->is_synchronized()) {
 249       _synch_lock = flock;
 250     }
 251   }
 252 
 253   // Use the raw liveness computation to make sure that unexpected
 254   // values don't propagate into the OSR frame.
 255   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 256   if (!live_locals.is_valid()) {
 257     // Degenerate or breakpointed method.

 284         if (C->log() != NULL) {
 285           C->log()->elem("OSR_mismatch local_index='%d'",index);
 286         }
 287         set_local(index, null());
 288         // and ignore it for the loads
 289         continue;
 290       }
 291     }
 292 
 293     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 294     if (type == Type::TOP || type == Type::HALF) {
 295       continue;
 296     }
 297     // If the type falls to bottom, then this must be a local that
 298     // is mixing ints and oops or some such.  Forcing it to top
 299     // makes it go dead.
 300     if (type == Type::BOTTOM) {
 301       continue;
 302     }
 303     // Construct code to access the appropriate local.
 304     BasicType bt = type->basic_type();
 305     if (type == TypePtr::NULL_PTR) {
 306       // Ptr types are mixed together with T_ADDRESS but NULL is
 307       // really for T_OBJECT types so correct it.
 308       bt = T_OBJECT;
 309     }
 310     Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
 311     set_local(index, value);
 312   }
 313 
 314   // Extract the needed stack entries from the interpreter frame.
 315   for (index = 0; index < sp(); index++) {
 316     const Type *type = osr_block->stack_type_at(index);
 317     if (type != Type::TOP) {
 318       // Currently the compiler bails out when attempting to on stack replace
 319       // at a bci with a non-empty stack.  We should not reach here.
 320       ShouldNotReachHere();
 321     }
 322   }
 323 
 324   // End the OSR migration
 325   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 326                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 327                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 328                     osr_buf);
 329 
 330   // Now that the interpreter state is loaded, make sure it will match

 578     }
 579   }
 580 
 581   if (depth() == 1 && !failing()) {
 582     if (C->clinit_barrier_on_entry()) {
 583       // Add check to deoptimize the nmethod once the holder class is fully initialized
 584       clinit_deopt();
 585     }
 586 
 587     // Add check to deoptimize the nmethod if RTM state was changed
 588     rtm_deopt();
 589   }
 590 
 591   // Check for bailouts during method entry or RTM state check setup.
 592   if (failing()) {
 593     if (log)  log->done("parse");
 594     C->set_default_node_notes(caller_nn);
 595     return;
 596   }
 597 





















 598   entry_map = map();  // capture any changes performed by method setup code
 599   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 600 
 601   // We begin parsing as if we have just encountered a jump to the
 602   // method entry.
 603   Block* entry_block = start_block();
 604   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 605   set_map_clone(entry_map);
 606   merge_common(entry_block, entry_block->next_path_num());
 607 
 608 #ifndef PRODUCT
 609   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 610   set_parse_histogram( parse_histogram_obj );
 611 #endif
 612 
 613   // Parse all the basic blocks.
 614   do_all_blocks();
 615 
 616   C->set_default_node_notes(caller_nn);
 617 

 760 void Parse::build_exits() {
 761   // make a clone of caller to prevent sharing of side-effects
 762   _exits.set_map(_exits.clone_map());
 763   _exits.clean_stack(_exits.sp());
 764   _exits.sync_jvms();
 765 
 766   RegionNode* region = new RegionNode(1);
 767   record_for_igvn(region);
 768   gvn().set_type_bottom(region);
 769   _exits.set_control(region);
 770 
 771   // Note:  iophi and memphi are not transformed until do_exits.
 772   Node* iophi  = new PhiNode(region, Type::ABIO);
 773   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 774   gvn().set_type_bottom(iophi);
 775   gvn().set_type_bottom(memphi);
 776   _exits.set_i_o(iophi);
 777   _exits.set_all_memory(memphi);
 778 
 779   // Add a return value to the exit state.  (Do not push it yet.)
 780   if (tf()->range()->cnt() > TypeFunc::Parms) {
 781     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
 782     if (ret_type->isa_int()) {
 783       BasicType ret_bt = method()->return_type()->basic_type();
 784       if (ret_bt == T_BOOLEAN ||
 785           ret_bt == T_CHAR ||
 786           ret_bt == T_BYTE ||
 787           ret_bt == T_SHORT) {
 788         ret_type = TypeInt::INT;
 789       }
 790     }
 791 
 792     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 793     // becomes loaded during the subsequent parsing, the loaded and unloaded
 794     // types will not join when we transform and push in do_exits().
 795     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 796     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 797       ret_type = TypeOopPtr::BOTTOM;
 798     }





 799     int         ret_size = type2size[ret_type->basic_type()];
 800     Node*       ret_phi  = new PhiNode(region, ret_type);
 801     gvn().set_type_bottom(ret_phi);
 802     _exits.ensure_stack(ret_size);
 803     assert((int)(tf()->range()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 804     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 805     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 806     // Note:  ret_phi is not yet pushed, until do_exits.
 807   }
 808 }
 809 
 810 
 811 //----------------------------build_start_state-------------------------------
 812 // Construct a state which contains only the incoming arguments from an
 813 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 814 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 815   int        arg_size = tf->domain()->cnt();
 816   int        max_size = MAX2(arg_size, (int)tf->range()->cnt());
 817   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 818   SafePointNode* map  = new SafePointNode(max_size, jvms);

 819   record_for_igvn(map);
 820   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 821   Node_Notes* old_nn = default_node_notes();
 822   if (old_nn != NULL && has_method()) {
 823     Node_Notes* entry_nn = old_nn->clone(this);
 824     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 825     entry_jvms->set_offsets(0);
 826     entry_jvms->set_bci(entry_bci());
 827     entry_nn->set_jvms(entry_jvms);
 828     set_default_node_notes(entry_nn);
 829   }
 830   uint i;
 831   for (i = 0; i < (uint)arg_size; i++) {
 832     Node* parm = initial_gvn()->transform(new ParmNode(start, i));
















 833     map->init_req(i, parm);
 834     // Record all these guys for later GVN.
 835     record_for_igvn(parm);
 836   }
 837   for (; i < map->req(); i++) {
 838     map->init_req(i, top());
 839   }
 840   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 841   set_default_node_notes(old_nn);
 842   jvms->set_map(map);
 843   return jvms;
 844 }
 845 
 846 //-----------------------------make_node_notes---------------------------------
 847 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 848   if (caller_nn == NULL)  return NULL;
 849   Node_Notes* nn = caller_nn->clone(C);
 850   JVMState* caller_jvms = nn->jvms();
 851   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 852   jvms->set_offsets(0);
 853   jvms->set_bci(_entry_bci);
 854   nn->set_jvms(jvms);
 855   return nn;
 856 }
 857 
 858 
 859 //--------------------------return_values--------------------------------------
 860 void Compile::return_values(JVMState* jvms) {
 861   GraphKit kit(jvms);
 862   Node* ret = new ReturnNode(TypeFunc::Parms,
 863                              kit.control(),
 864                              kit.i_o(),
 865                              kit.reset_memory(),
 866                              kit.frameptr(),
 867                              kit.returnadr());
 868   // Add zero or 1 return values
 869   int ret_size = tf()->range()->cnt() - TypeFunc::Parms;
 870   if (ret_size > 0) {
 871     kit.inc_sp(-ret_size);  // pop the return value(s)
 872     kit.sync_jvms();
 873     ret->add_req(kit.argument(0));
 874     // Note:  The second dummy edge is not needed by a ReturnNode.



















 875   }
 876   // bind it to root
 877   root()->add_req(ret);
 878   record_for_igvn(ret);
 879   initial_gvn()->transform_no_reclaim(ret);
 880 }
 881 
 882 //------------------------rethrow_exceptions-----------------------------------
 883 // Bind all exception states in the list into a single RethrowNode.
 884 void Compile::rethrow_exceptions(JVMState* jvms) {
 885   GraphKit kit(jvms);
 886   if (!kit.has_exceptions())  return;  // nothing to generate
 887   // Load my combined exception state into the kit, with all phis transformed:
 888   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 889   Node* ex_oop = kit.use_exception_state(ex_map);
 890   RethrowNode* exit = new RethrowNode(kit.control(),
 891                                       kit.i_o(), kit.reset_memory(),
 892                                       kit.frameptr(), kit.returnadr(),
 893                                       // like a return but with exception input
 894                                       ex_oop);

 978   //    to complete, we force all writes to complete.
 979   //
 980   // 2. Experimental VM option is used to force the barrier if any field
 981   //    was written out in the constructor.
 982   //
 983   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
 984   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
 985   //    MemBarVolatile is used before volatile load instead of after volatile
 986   //    store, so there's no barrier after the store.
 987   //    We want to guarantee the same behavior as on platforms with total store
 988   //    order, although this is not required by the Java memory model.
 989   //    In this case, we want to enforce visibility of volatile field
 990   //    initializations which are performed in constructors.
 991   //    So as with finals, we add a barrier here.
 992   //
 993   // "All bets are off" unless the first publication occurs after a
 994   // normal return from the constructor.  We do not attempt to detect
 995   // such unusual early publications.  But no barrier is needed on
 996   // exceptional returns, since they cannot publish normally.
 997   //
 998   if (method()->is_initializer() &&
 999        (wrote_final() ||
1000          (AlwaysSafeConstructors && wrote_fields()) ||
1001          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1002     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1003 
1004     // If Memory barrier is created for final fields write
1005     // and allocation node does not escape the initialize method,
1006     // then barrier introduced by allocation node can be removed.
1007     if (DoEscapeAnalysis && alloc_with_final()) {
1008       AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1009       alloc->compute_MemBar_redundancy(method());
1010     }
1011     if (PrintOpto && (Verbose || WizardMode)) {
1012       method()->print_name();
1013       tty->print_cr(" writes finals and needs a memory barrier");
1014     }
1015   }
1016 
1017   // Any method can write a @Stable field; insert memory barriers
1018   // after those also. Can't bind predecessor allocation node (if any)
1019   // with barrier because allocation doesn't always dominate
1020   // MemBarRelease.
1021   if (wrote_stable()) {
1022     _exits.insert_mem_bar(Op_MemBarRelease);
1023     if (PrintOpto && (Verbose || WizardMode)) {
1024       method()->print_name();
1025       tty->print_cr(" writes @Stable and needs a memory barrier");
1026     }
1027   }
1028 
1029   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1030     // transform each slice of the original memphi:
1031     mms.set_memory(_gvn.transform(mms.memory()));
1032   }
1033   // Clean up input MergeMems created by transforming the slices
1034   _gvn.transform(_exits.merged_memory());
1035 
1036   if (tf()->range()->cnt() > TypeFunc::Parms) {
1037     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
1038     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1039     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1040       // If the type we set for the ret_phi in build_exits() is too optimistic and
1041       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1042       // loading.  It could also be due to an error, so mark this method as not compilable because
1043       // otherwise this could lead to an infinite compile loop.
1044       // In any case, this code path is rarely (and never in my testing) reached.
1045       C->record_method_not_compilable("Can't determine return type.");
1046       return;
1047     }
1048     if (ret_type->isa_int()) {
1049       BasicType ret_bt = method()->return_type()->basic_type();
1050       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1051     }
1052     _exits.push_node(ret_type->basic_type(), ret_phi);
1053   }
1054 
1055   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1056 
1057   // Unlock along the exceptional paths.

1110 }
1111 
1112 //-----------------------------create_entry_map-------------------------------
1113 // Initialize our parser map to contain the types at method entry.
1114 // For OSR, the map contains a single RawPtr parameter.
1115 // Initial monitor locking for sync. methods is performed by do_method_entry.
1116 SafePointNode* Parse::create_entry_map() {
1117   // Check for really stupid bail-out cases.
1118   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1119   if (len >= 32760) {
1120     C->record_method_not_compilable("too many local variables");
1121     return NULL;
1122   }
1123 
1124   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1125   _caller->map()->delete_replaced_nodes();
1126 
1127   // If this is an inlined method, we may have to do a receiver null check.
1128   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1129     GraphKit kit(_caller);
1130     kit.null_check_receiver_before_call(method());
1131     _caller = kit.transfer_exceptions_into_jvms();
1132     if (kit.stopped()) {
1133       _exits.add_exception_states_from(_caller);
1134       _exits.set_jvms(_caller);
1135       return NULL;
1136     }
1137   }
1138 
1139   assert(method() != NULL, "parser must have a method");
1140 
1141   // Create an initial safepoint to hold JVM state during parsing
1142   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1143   set_map(new SafePointNode(len, jvms));
1144   jvms->set_map(map());
1145   record_for_igvn(map());
1146   assert(jvms->endoff() == len, "correct jvms sizing");
1147 
1148   SafePointNode* inmap = _caller->map();
1149   assert(inmap != NULL, "must have inmap");
1150   // In case of null check on receiver above
1151   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1152 
1153   uint i;
1154 
1155   // Pass thru the predefined input parameters.
1156   for (i = 0; i < TypeFunc::Parms; i++) {
1157     map()->init_req(i, inmap->in(i));
1158   }
1159 
1160   if (depth() == 1) {
1161     assert(map()->memory()->Opcode() == Op_Parm, "");
1162     // Insert the memory aliasing node
1163     set_all_memory(reset_memory());
1164   }
1165   assert(merged_memory(), "");
1166 
1167   // Now add the locals which are initially bound to arguments:
1168   uint arg_size = tf()->domain()->cnt();
1169   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1170   for (i = TypeFunc::Parms; i < arg_size; i++) {
1171     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1172   }
1173 
1174   // Clear out the rest of the map (locals and stack)
1175   for (i = arg_size; i < len; i++) {
1176     map()->init_req(i, top());
1177   }
1178 
1179   SafePointNode* entry_map = stop();
1180   return entry_map;
1181 }
1182 
1183 //-----------------------------do_method_entry--------------------------------
1184 // Emit any code needed in the pseudo-block before BCI zero.
1185 // The main thing to do is lock the receiver of a synchronized method.
1186 void Parse::do_method_entry() {
1187   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1188   set_sp(0);                         // Java Stack Pointer

1222 
1223   // If the method is synchronized, we need to construct a lock node, attach
1224   // it to the Start node, and pin it there.
1225   if (method()->is_synchronized()) {
1226     // Insert a FastLockNode right after the Start which takes as arguments
1227     // the current thread pointer, the "this" pointer & the address of the
1228     // stack slot pair used for the lock.  The "this" pointer is a projection
1229     // off the start node, but the locking spot has to be constructed by
1230     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1231     // becomes the second argument to the FastLockNode call.  The
1232     // FastLockNode becomes the new control parent to pin it to the start.
1233 
1234     // Setup Object Pointer
1235     Node *lock_obj = NULL;
1236     if (method()->is_static()) {
1237       ciInstance* mirror = _method->holder()->java_mirror();
1238       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1239       lock_obj = makecon(t_lock);
1240     } else {                  // Else pass the "this" pointer,
1241       lock_obj = local(0);    // which is Parm0 from StartNode

1242     }
1243     // Clear out dead values from the debug info.
1244     kill_dead_locals();
1245     // Build the FastLockNode
1246     _synch_lock = shared_lock(lock_obj);
1247   }
1248 
1249   // Feed profiling data for parameters to the type system so it can
1250   // propagate it as speculative types
1251   record_profiled_parameters_for_speculation();
1252 }
1253 
1254 //------------------------------init_blocks------------------------------------
1255 // Initialize our parser map to contain the types/monitors at method entry.
1256 void Parse::init_blocks() {
1257   // Create the blocks.
1258   _block_count = flow()->block_count();
1259   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1260 
1261   // Initialize the structs.

1633 //--------------------handle_missing_successor---------------------------------
1634 void Parse::handle_missing_successor(int target_bci) {
1635 #ifndef PRODUCT
1636   Block* b = block();
1637   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1638   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1639 #endif
1640   ShouldNotReachHere();
1641 }
1642 
1643 //--------------------------merge_common---------------------------------------
1644 void Parse::merge_common(Parse::Block* target, int pnum) {
1645   if (TraceOptoParse) {
1646     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1647   }
1648 
1649   // Zap extra stack slots to top
1650   assert(sp() == target->start_sp(), "");
1651   clean_stack(sp());
1652 






































1653   if (!target->is_merged()) {   // No prior mapping at this bci
1654     if (TraceOptoParse) { tty->print(" with empty state");  }
1655 
1656     // If this path is dead, do not bother capturing it as a merge.
1657     // It is "as if" we had 1 fewer predecessors from the beginning.
1658     if (stopped()) {
1659       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1660       return;
1661     }
1662 
1663     // Make a region if we know there are multiple or unpredictable inputs.
1664     // (Also, if this is a plain fall-through, we might see another region,
1665     // which must not be allowed into this block's map.)
1666     if (pnum > PhiNode::Input         // Known multiple inputs.
1667         || target->is_handler()       // These have unpredictable inputs.
1668         || target->is_loop_head()     // Known multiple inputs
1669         || control()->is_Region()) {  // We must hide this guy.
1670 
1671       int current_bci = bci();
1672       set_parse_bci(target->start()); // Set target bci

1686       gvn().set_type(r, Type::CONTROL);
1687       record_for_igvn(r);
1688       // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1689       // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1690       r->init_req(pnum, control());
1691       set_control(r);
1692       set_parse_bci(current_bci); // Restore bci
1693     }
1694 
1695     // Convert the existing Parser mapping into a mapping at this bci.
1696     store_state_to(target);
1697     assert(target->is_merged(), "do not come here twice");
1698 
1699   } else {                      // Prior mapping at this bci
1700     if (TraceOptoParse) {  tty->print(" with previous state"); }
1701 #ifdef ASSERT
1702     if (target->is_SEL_head()) {
1703       target->mark_merged_backedge(block());
1704     }
1705 #endif

1706     // We must not manufacture more phis if the target is already parsed.
1707     bool nophi = target->is_parsed();
1708 
1709     SafePointNode* newin = map();// Hang on to incoming mapping
1710     Block* save_block = block(); // Hang on to incoming block;
1711     load_state_from(target);    // Get prior mapping
1712 
1713     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1714     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1715     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1716     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1717 
1718     // Iterate over my current mapping and the old mapping.
1719     // Where different, insert Phi functions.
1720     // Use any existing Phi functions.
1721     assert(control()->is_Region(), "must be merging to a region");
1722     RegionNode* r = control()->as_Region();
1723 
1724     // Compute where to merge into
1725     // Merge incoming control path
1726     r->init_req(pnum, newin->control());
1727 
1728     if (pnum == 1) {            // Last merge for this Region?
1729       if (!block()->flow()->is_irreducible_entry()) {
1730         Node* result = _gvn.transform_no_reclaim(r);
1731         if (r != result && TraceOptoParse) {
1732           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1733         }
1734       }
1735       record_for_igvn(r);
1736     }
1737 
1738     // Update all the non-control inputs to map:
1739     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1740     bool check_elide_phi = target->is_SEL_backedge(save_block);

1741     for (uint j = 1; j < newin->req(); j++) {
1742       Node* m = map()->in(j);   // Current state of target.
1743       Node* n = newin->in(j);   // Incoming change to target state.
1744       PhiNode* phi;
1745       if (m->is_Phi() && m->as_Phi()->region() == r)
1746         phi = m->as_Phi();
1747       else


1748         phi = NULL;

1749       if (m != n) {             // Different; must merge
1750         switch (j) {
1751         // Frame pointer and Return Address never changes
1752         case TypeFunc::FramePtr:// Drop m, use the original value
1753         case TypeFunc::ReturnAdr:
1754           break;
1755         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1756           assert(phi == NULL, "the merge contains phis, not vice versa");
1757           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1758           continue;
1759         default:                // All normal stuff
1760           if (phi == NULL) {
1761             const JVMState* jvms = map()->jvms();
1762             if (EliminateNestedLocks &&
1763                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1764               // BoxLock nodes are not commoning.
1765               // Use old BoxLock node as merged box.
1766               assert(newin->jvms()->is_monitor_box(j), "sanity");
1767               // This assert also tests that nodes are BoxLock.
1768               assert(BoxLockNode::same_slot(n, m), "sanity");
1769               C->gvn_replace_by(n, m);
1770             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1771               phi = ensure_phi(j, nophi);
1772             }
1773           }
1774           break;
1775         }
1776       }
1777       // At this point, n might be top if:
1778       //  - there is no phi (because TypeFlow detected a conflict), or
1779       //  - the corresponding control edges is top (a dead incoming path)
1780       // It is a bug if we create a phi which sees a garbage value on a live path.
1781 
1782       if (phi != NULL) {























1783         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1784         assert(phi->region() == r, "");
1785         phi->set_req(pnum, n);  // Then add 'n' to the merge
1786         if (pnum == PhiNode::Input) {
1787           // Last merge for this Phi.
1788           // So far, Phis have had a reasonable type from ciTypeFlow.
1789           // Now _gvn will join that with the meet of current inputs.
1790           // BOTTOM is never permissible here, 'cause pessimistically
1791           // Phis of pointers cannot lose the basic pointer type.
1792           debug_only(const Type* bt1 = phi->bottom_type());
1793           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1794           map()->set_req(j, _gvn.transform_no_reclaim(phi));
1795           debug_only(const Type* bt2 = phi->bottom_type());
1796           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1797           record_for_igvn(phi);
1798         }
1799       }
1800     } // End of for all values to be merged
1801 
1802     if (pnum == PhiNode::Input &&
1803         !r->in(0)) {         // The occasional useless Region
1804       assert(control() == r, "");
1805       set_control(r->nonnull_req());
1806     }
1807 
1808     map()->merge_replaced_nodes_with(newin);
1809 
1810     // newin has been subsumed into the lazy merge, and is now dead.
1811     set_block(save_block);
1812 
1813     stop();                     // done with this guy, for now
1814   }
1815 
1816   if (TraceOptoParse) {
1817     tty->print_cr(" on path %d", pnum);
1818   }
1819 
1820   // Done with this parser state.
1821   assert(stopped(), "");
1822 }
1823 

1935 
1936   // Add new path to the region.
1937   uint pnum = r->req();
1938   r->add_req(NULL);
1939 
1940   for (uint i = 1; i < map->req(); i++) {
1941     Node* n = map->in(i);
1942     if (i == TypeFunc::Memory) {
1943       // Ensure a phi on all currently known memories.
1944       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
1945         Node* phi = mms.memory();
1946         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
1947           assert(phi->req() == pnum, "must be same size as region");
1948           phi->add_req(NULL);
1949         }
1950       }
1951     } else {
1952       if (n->is_Phi() && n->as_Phi()->region() == r) {
1953         assert(n->req() == pnum, "must be same size as region");
1954         n->add_req(NULL);


1955       }
1956     }
1957   }
1958 
1959   return pnum;
1960 }
1961 
1962 //------------------------------ensure_phi-------------------------------------
1963 // Turn the idx'th entry of the current map into a Phi
1964 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
1965   SafePointNode* map = this->map();
1966   Node* region = map->control();
1967   assert(region->is_Region(), "");
1968 
1969   Node* o = map->in(idx);
1970   assert(o != NULL, "");
1971 
1972   if (o == top())  return NULL; // TOP always merges into TOP
1973 
1974   if (o->is_Phi() && o->as_Phi()->region() == region) {
1975     return o->as_Phi();
1976   }




1977 
1978   // Now use a Phi here for merging
1979   assert(!nocreate, "Cannot build a phi for a block already parsed.");
1980   const JVMState* jvms = map->jvms();
1981   const Type* t = NULL;
1982   if (jvms->is_loc(idx)) {
1983     t = block()->local_type_at(idx - jvms->locoff());
1984   } else if (jvms->is_stk(idx)) {
1985     t = block()->stack_type_at(idx - jvms->stkoff());
1986   } else if (jvms->is_mon(idx)) {
1987     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
1988     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
1989   } else if ((uint)idx < TypeFunc::Parms) {
1990     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
1991   } else {
1992     assert(false, "no type information for this phi");
1993   }
1994 
1995   // If the type falls to bottom, then this must be a local that
1996   // is mixing ints and oops or some such.  Forcing it to top
1997   // makes it go dead.
1998   if (t == Type::BOTTOM) {
1999     map->set_req(idx, top());
2000     return NULL;
2001   }
2002 
2003   // Do not create phis for top either.
2004   // A top on a non-null control flow must be an unused even after the.phi.
2005   if (t == Type::TOP || t == Type::HALF) {
2006     map->set_req(idx, top());
2007     return NULL;
2008   }
2009 
2010   PhiNode* phi = PhiNode::make(region, o, t);
2011   gvn().set_type(phi, t);
2012   if (C->do_escape_analysis()) record_for_igvn(phi);
2013   map->set_req(idx, phi);
2014   return phi;









2015 }
2016 
2017 //--------------------------ensure_memory_phi----------------------------------
2018 // Turn the idx'th slice of the current memory into a Phi
2019 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2020   MergeMemNode* mem = merged_memory();
2021   Node* region = control();
2022   assert(region->is_Region(), "");
2023 
2024   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2025   assert(o != NULL && o != top(), "");
2026 
2027   PhiNode* phi;
2028   if (o->is_Phi() && o->as_Phi()->region() == region) {
2029     phi = o->as_Phi();
2030     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2031       // clone the shared base memory phi to make a new memory split
2032       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2033       const Type* t = phi->bottom_type();
2034       const TypePtr* adr_type = C->get_adr_type(idx);

2187   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2188   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2189   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2190   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2191   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2192   { BuildCutout unless(this, tst, PROB_ALWAYS);
2193     uncommon_trap(Deoptimization::Reason_tenured,
2194                   Deoptimization::Action_make_not_entrant);
2195   }
2196 }
2197 
2198 //------------------------------return_current---------------------------------
2199 // Append current _map to _exit_return
2200 void Parse::return_current(Node* value) {
2201   if (RegisterFinalizersAtInit &&
2202       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2203     call_register_finalizer();
2204   }
2205 
2206   // Do not set_parse_bci, so that return goo is credited to the return insn.
2207   set_bci(InvocationEntryBci);



2208   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2209     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2210   }
2211   if (C->env()->dtrace_method_probes()) {
2212     make_dtrace_method_exit(method());
2213   }
2214   SafePointNode* exit_return = _exits.map();
2215   exit_return->in( TypeFunc::Control  )->add_req( control() );
2216   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2217   Node *mem = exit_return->in( TypeFunc::Memory   );
2218   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2219     if (mms.is_empty()) {
2220       // get a copy of the base memory, and patch just this one input
2221       const TypePtr* adr_type = mms.adr_type(C);
2222       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2223       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2224       gvn().set_type_bottom(phi);
2225       phi->del_req(phi->req()-1);  // prepare to re-patch
2226       mms.set_memory(phi);
2227     }
2228     mms.memory()->add_req(mms.memory2());
2229   }
2230 
2231   // frame pointer is always same, already captured
2232   if (value != NULL) {
2233     // If returning oops to an interface-return, there is a silent free
2234     // cast from oop to interface allowed by the Verifier.  Make it explicit
2235     // here.
2236     Node* phi = _exits.argument(0);
2237     const TypeInstPtr *tr = phi->bottom_type()->isa_instptr();
2238     if (tr && tr->klass()->is_loaded() &&
2239         tr->klass()->is_interface()) {
2240       const TypeInstPtr *tp = value->bottom_type()->isa_instptr();
2241       if (tp && tp->klass()->is_loaded() &&
2242           !tp->klass()->is_interface()) {























2243         // sharpen the type eagerly; this eases certain assert checking
2244         if (tp->higher_equal(TypeInstPtr::NOTNULL))
2245           tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();

2246         value = _gvn.transform(new CheckCastPPNode(0, value, tr));
2247       }
2248     } else {
2249       // Also handle returns of oop-arrays to an arrays-of-interface return
2250       const TypeInstPtr* phi_tip;
2251       const TypeInstPtr* val_tip;
2252       Type::get_arrays_base_elements(phi->bottom_type(), value->bottom_type(), &phi_tip, &val_tip);
2253       if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
2254           val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
2255         value = _gvn.transform(new CheckCastPPNode(0, value, phi->bottom_type()));
2256       }
2257     }
2258     phi->add_req(value);
2259   }
2260 

















2261   if (_first_return) {
2262     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2263     _first_return = false;
2264   } else {
2265     _exits.map()->merge_replaced_nodes_with(map());
2266   }
2267 
2268   stop_and_kill_map();          // This CFG path dies here
2269 }
2270 
2271 
2272 //------------------------------add_safepoint----------------------------------
2273 void Parse::add_safepoint() {
2274   uint parms = TypeFunc::Parms+1;
2275 
2276   // Clear out dead values from the debug info.
2277   kill_dead_locals();
2278 
2279   // Clone the JVM State
2280   SafePointNode *sfpnt = new SafePointNode(parms, NULL);

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "interpreter/linkResolver.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "oops/method.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/c2compiler.hpp"
  32 #include "opto/castnode.hpp"
  33 #include "opto/idealGraphPrinter.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/locknode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/opaquenode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/type.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 // Static array so we can figure out which bytecodes stop us from compiling
  49 // the most. Some of the non-static variables are needed in bytecodeInfo.cpp
  50 // and eventually should be encapsulated in a proper class (gri 8/18/98).
  51 
  52 #ifndef PRODUCT
  53 int nodes_created              = 0;
  54 int methods_parsed             = 0;

  86   }
  87   if (all_null_checks_found) {
  88     tty->print_cr("%d made implicit (%2d%%)", implicit_null_checks,
  89                   (100*implicit_null_checks)/all_null_checks_found);
  90   }
  91   if (SharedRuntime::_implicit_null_throws) {
  92     tty->print_cr("%d implicit null exceptions at runtime",
  93                   SharedRuntime::_implicit_null_throws);
  94   }
  95 
  96   if (PrintParseStatistics && BytecodeParseHistogram::initialized()) {
  97     BytecodeParseHistogram::print();
  98   }
  99 }
 100 #endif
 101 
 102 //------------------------------ON STACK REPLACEMENT---------------------------
 103 
 104 // Construct a node which can be used to get incoming state for
 105 // on stack replacement.
 106 Node* Parse::fetch_interpreter_state(int index,
 107                                      const Type* type,
 108                                      Node* local_addrs,
 109                                      Node* local_addrs_base) {
 110   BasicType bt = type->basic_type();
 111   if (type == TypePtr::NULL_PTR) {
 112     // Ptr types are mixed together with T_ADDRESS but NULL is
 113     // really for T_OBJECT types so correct it.
 114     bt = T_OBJECT;
 115   }
 116   Node *mem = memory(Compile::AliasIdxRaw);
 117   Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
 118   Node *ctl = control();
 119 
 120   // Very similar to LoadNode::make, except we handle un-aligned longs and
 121   // doubles on Sparc.  Intel can handle them just fine directly.
 122   Node *l = NULL;
 123   switch (bt) {                // Signature is flattened
 124   case T_INT:     l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
 125   case T_FLOAT:   l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
 126   case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
 127   case T_INLINE_TYPE:
 128   case T_OBJECT:  l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
 129   case T_LONG:
 130   case T_DOUBLE: {
 131     // Since arguments are in reverse order, the argument address 'adr'
 132     // refers to the back half of the long/double.  Recompute adr.
 133     adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
 134     if (Matcher::misaligned_doubles_ok) {
 135       l = (bt == T_DOUBLE)
 136         ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
 137         : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
 138     } else {
 139       l = (bt == T_DOUBLE)
 140         ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
 141         : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
 142     }
 143     break;
 144   }
 145   default: ShouldNotReachHere();
 146   }
 147   return _gvn.transform(l);
 148 }
 149 
 150 // Helper routine to prevent the interpreter from handing
 151 // unexpected typestate to an OSR method.
 152 // The Node l is a value newly dug out of the interpreter frame.
 153 // The type is the type predicted by ciTypeFlow.  Note that it is
 154 // not a general type, but can only come from Type::get_typeflow_type.
 155 // The safepoint is a map which will feed an uncommon trap.
 156 Node* Parse::check_interpreter_type(Node* l, const Type* type,
 157                                     SafePointNode* &bad_type_exit) {

 158   const TypeOopPtr* tp = type->isa_oopptr();
 159   if (type->isa_inlinetype() != NULL) {
 160     // The interpreter passes inline types as oops
 161     tp = TypeOopPtr::make_from_klass(type->inline_klass());
 162     tp = tp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
 163   }
 164 
 165   // TypeFlow may assert null-ness if a type appears unloaded.
 166   if (type == TypePtr::NULL_PTR ||
 167       (tp != NULL && !tp->klass()->is_loaded())) {
 168     // Value must be null, not a real oop.
 169     Node* chk = _gvn.transform( new CmpPNode(l, null()) );
 170     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 171     IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 172     set_control(_gvn.transform( new IfTrueNode(iff) ));
 173     Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
 174     bad_type_exit->control()->add_req(bad_type);
 175     l = null();
 176   }
 177 
 178   // Typeflow can also cut off paths from the CFG, based on
 179   // types which appear unloaded, or call sites which appear unlinked.
 180   // When paths are cut off, values at later merge points can rise
 181   // toward more specific classes.  Make sure these specific classes
 182   // are still in effect.
 183   if (tp != NULL && tp->klass() != C->env()->Object_klass()) {
 184     // TypeFlow asserted a specific object type.  Value must have that type.
 185     Node* bad_type_ctrl = NULL;
 186     if (tp->is_inlinetypeptr() && !tp->maybe_null()) {
 187       // Check inline types for null here to prevent checkcast from adding an
 188       // exception state before the bytecode entry (use 'bad_type_ctrl' instead).
 189       l = null_check_oop(l, &bad_type_ctrl);
 190       bad_type_exit->control()->add_req(bad_type_ctrl);
 191     }
 192     l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
 193     bad_type_exit->control()->add_req(bad_type_ctrl);
 194   }



 195   assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
 196   return l;
 197 }
 198 
 199 // Helper routine which sets up elements of the initial parser map when
 200 // performing a parse for on stack replacement.  Add values into map.
 201 // The only parameter contains the address of a interpreter arguments.
 202 void Parse::load_interpreter_state(Node* osr_buf) {
 203   int index;
 204   int max_locals = jvms()->loc_size();
 205   int max_stack  = jvms()->stk_size();
 206 

 207   // Mismatch between method and jvms can occur since map briefly held
 208   // an OSR entry state (which takes up one RawPtr word).
 209   assert(max_locals == method()->max_locals(), "sanity");
 210   assert(max_stack  >= method()->max_stack(),  "sanity");
 211   assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
 212   assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
 213 
 214   // Find the start block.
 215   Block* osr_block = start_block();
 216   assert(osr_block->start() == osr_bci(), "sanity");
 217 
 218   // Set initial BCI.
 219   set_parse_bci(osr_block->start());
 220 
 221   // Set initial stack depth.
 222   set_sp(osr_block->start_sp());
 223 
 224   // Check bailouts.  We currently do not perform on stack replacement
 225   // of loops in catch blocks or loops which branch with a non-empty stack.
 226   if (sp() != 0) {
 227     C->record_method_not_compilable("OSR starts with non-empty stack");
 228     return;
 229   }
 230   // Do not OSR inside finally clauses:
 231   if (osr_block->has_trap_at(osr_block->start())) {
 232     C->record_method_not_compilable("OSR starts with an immediate trap");
 233     return;
 234   }
 235 
 236   // Commute monitors from interpreter frame to compiler frame.
 237   assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
 238   int mcnt = osr_block->flow()->monitor_count();
 239   Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
 240   for (index = 0; index < mcnt; index++) {
 241     // Make a BoxLockNode for the monitor.
 242     Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
 243 

 244     // Displaced headers and locked objects are interleaved in the
 245     // temp OSR buffer.  We only copy the locked objects out here.
 246     // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
 247     Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
 248     // Try and copy the displaced header to the BoxNode
 249     Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);

 250 
 251     store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 252 
 253     // Build a bogus FastLockNode (no code will be generated) and push the
 254     // monitor into our debug info.
 255     const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
 256     map()->push_monitor(flock);
 257 
 258     // If the lock is our method synchronization lock, tuck it away in
 259     // _sync_lock for return and rethrow exit paths.
 260     if (index == 0 && method()->is_synchronized()) {
 261       _synch_lock = flock;
 262     }
 263   }
 264 
 265   // Use the raw liveness computation to make sure that unexpected
 266   // values don't propagate into the OSR frame.
 267   MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
 268   if (!live_locals.is_valid()) {
 269     // Degenerate or breakpointed method.

 296         if (C->log() != NULL) {
 297           C->log()->elem("OSR_mismatch local_index='%d'",index);
 298         }
 299         set_local(index, null());
 300         // and ignore it for the loads
 301         continue;
 302       }
 303     }
 304 
 305     // Filter out TOP, HALF, and BOTTOM.  (Cf. ensure_phi.)
 306     if (type == Type::TOP || type == Type::HALF) {
 307       continue;
 308     }
 309     // If the type falls to bottom, then this must be a local that
 310     // is mixing ints and oops or some such.  Forcing it to top
 311     // makes it go dead.
 312     if (type == Type::BOTTOM) {
 313       continue;
 314     }
 315     // Construct code to access the appropriate local.
 316     Node* value = fetch_interpreter_state(index, type, locals_addr, osr_buf);






 317     set_local(index, value);
 318   }
 319 
 320   // Extract the needed stack entries from the interpreter frame.
 321   for (index = 0; index < sp(); index++) {
 322     const Type *type = osr_block->stack_type_at(index);
 323     if (type != Type::TOP) {
 324       // Currently the compiler bails out when attempting to on stack replace
 325       // at a bci with a non-empty stack.  We should not reach here.
 326       ShouldNotReachHere();
 327     }
 328   }
 329 
 330   // End the OSR migration
 331   make_runtime_call(RC_LEAF, OptoRuntime::osr_end_Type(),
 332                     CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
 333                     "OSR_migration_end", TypeRawPtr::BOTTOM,
 334                     osr_buf);
 335 
 336   // Now that the interpreter state is loaded, make sure it will match

 584     }
 585   }
 586 
 587   if (depth() == 1 && !failing()) {
 588     if (C->clinit_barrier_on_entry()) {
 589       // Add check to deoptimize the nmethod once the holder class is fully initialized
 590       clinit_deopt();
 591     }
 592 
 593     // Add check to deoptimize the nmethod if RTM state was changed
 594     rtm_deopt();
 595   }
 596 
 597   // Check for bailouts during method entry or RTM state check setup.
 598   if (failing()) {
 599     if (log)  log->done("parse");
 600     C->set_default_node_notes(caller_nn);
 601     return;
 602   }
 603 
 604   // Handle inline type arguments
 605   int arg_size_sig = tf()->domain_sig()->cnt();
 606   for (uint i = 0; i < (uint)arg_size_sig; i++) {
 607     Node* parm = map()->in(i);
 608     const Type* t = _gvn.type(parm);
 609     if (t->is_inlinetypeptr()) {
 610       // Create InlineTypeNode from the oop and replace the parameter
 611       Node* vt = InlineTypeNode::make_from_oop(this, parm, t->inline_klass(), !t->maybe_null());
 612       map()->replace_edge(parm, vt);
 613     } else if (UseTypeSpeculation && (i == (uint)(arg_size_sig - 1)) && !is_osr_parse() &&
 614                method()->has_vararg() && t->isa_aryptr() != NULL && !t->is_aryptr()->is_not_null_free()) {
 615       // Speculate on varargs Object array being not null-free (and therefore also not flattened)
 616       const TypePtr* spec_type = t->speculative();
 617       spec_type = (spec_type != NULL && spec_type->isa_aryptr() != NULL) ? spec_type : t->is_aryptr();
 618       spec_type = spec_type->remove_speculative()->is_aryptr()->cast_to_not_null_free();
 619       spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, spec_type);
 620       Node* cast = _gvn.transform(new CheckCastPPNode(control(), parm, t->join_speculative(spec_type)));
 621       replace_in_map(parm, cast);
 622     }
 623   }
 624 
 625   entry_map = map();  // capture any changes performed by method setup code
 626   assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
 627 
 628   // We begin parsing as if we have just encountered a jump to the
 629   // method entry.
 630   Block* entry_block = start_block();
 631   assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
 632   set_map_clone(entry_map);
 633   merge_common(entry_block, entry_block->next_path_num());
 634 
 635 #ifndef PRODUCT
 636   BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
 637   set_parse_histogram( parse_histogram_obj );
 638 #endif
 639 
 640   // Parse all the basic blocks.
 641   do_all_blocks();
 642 
 643   C->set_default_node_notes(caller_nn);
 644 

 787 void Parse::build_exits() {
 788   // make a clone of caller to prevent sharing of side-effects
 789   _exits.set_map(_exits.clone_map());
 790   _exits.clean_stack(_exits.sp());
 791   _exits.sync_jvms();
 792 
 793   RegionNode* region = new RegionNode(1);
 794   record_for_igvn(region);
 795   gvn().set_type_bottom(region);
 796   _exits.set_control(region);
 797 
 798   // Note:  iophi and memphi are not transformed until do_exits.
 799   Node* iophi  = new PhiNode(region, Type::ABIO);
 800   Node* memphi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM);
 801   gvn().set_type_bottom(iophi);
 802   gvn().set_type_bottom(memphi);
 803   _exits.set_i_o(iophi);
 804   _exits.set_all_memory(memphi);
 805 
 806   // Add a return value to the exit state.  (Do not push it yet.)
 807   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
 808     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
 809     if (ret_type->isa_int()) {
 810       BasicType ret_bt = method()->return_type()->basic_type();
 811       if (ret_bt == T_BOOLEAN ||
 812           ret_bt == T_CHAR ||
 813           ret_bt == T_BYTE ||
 814           ret_bt == T_SHORT) {
 815         ret_type = TypeInt::INT;
 816       }
 817     }
 818 
 819     // Don't "bind" an unloaded return klass to the ret_phi. If the klass
 820     // becomes loaded during the subsequent parsing, the loaded and unloaded
 821     // types will not join when we transform and push in do_exits().
 822     const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
 823     if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
 824       ret_type = TypeOopPtr::BOTTOM;
 825     }
 826     // Scalarize inline type when returning as fields or inlining non-incrementally
 827     if ((tf()->returns_inline_type_as_fields() || (_caller->has_method() && !Compile::current()->inlining_incrementally())) &&
 828         ret_type->is_inlinetypeptr() && !ret_type->maybe_null()) {
 829       ret_type = TypeInlineType::make(ret_type->inline_klass());
 830     }
 831     int         ret_size = type2size[ret_type->basic_type()];
 832     Node*       ret_phi  = new PhiNode(region, ret_type);
 833     gvn().set_type_bottom(ret_phi);
 834     _exits.ensure_stack(ret_size);
 835     assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
 836     assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
 837     _exits.set_argument(0, ret_phi);  // here is where the parser finds it
 838     // Note:  ret_phi is not yet pushed, until do_exits.
 839   }
 840 }
 841 

 842 //----------------------------build_start_state-------------------------------
 843 // Construct a state which contains only the incoming arguments from an
 844 // unknown caller.  The method & bci will be NULL & InvocationEntryBci.
 845 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
 846   int        arg_size = tf->domain_sig()->cnt();
 847   int        max_size = MAX2(arg_size, (int)tf->range_cc()->cnt());
 848   JVMState*  jvms     = new (this) JVMState(max_size - TypeFunc::Parms);
 849   SafePointNode* map  = new SafePointNode(max_size, jvms);
 850   jvms->set_map(map);
 851   record_for_igvn(map);
 852   assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size");
 853   Node_Notes* old_nn = default_node_notes();
 854   if (old_nn != NULL && has_method()) {
 855     Node_Notes* entry_nn = old_nn->clone(this);
 856     JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms());
 857     entry_jvms->set_offsets(0);
 858     entry_jvms->set_bci(entry_bci());
 859     entry_nn->set_jvms(entry_jvms);
 860     set_default_node_notes(entry_nn);
 861   }
 862   PhaseGVN& gvn = *initial_gvn();
 863   uint i = 0;
 864   for (uint j = 0; i < (uint)arg_size; i++) {
 865     const Type* t = tf->domain_sig()->field_at(i);
 866     Node* parm = NULL;
 867     if (has_scalarized_args() && t->is_inlinetypeptr() && !t->maybe_null() && t->inline_klass()->can_be_passed_as_fields()) {
 868       // Inline type arguments are not passed by reference: we get an argument per
 869       // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 870       GraphKit kit(jvms, &gvn);
 871       kit.set_control(map->control());
 872       Node* old_mem = map->memory();
 873       // Use immutable memory for inline type loads and restore it below
 874       kit.set_all_memory(C->immutable_memory());
 875       parm = InlineTypeNode::make_from_multi(&kit, start, t->inline_klass(), j, true);
 876       map->set_control(kit.control());
 877       map->set_memory(old_mem);
 878     } else {
 879       parm = gvn.transform(new ParmNode(start, j++));
 880     }
 881     map->init_req(i, parm);
 882     // Record all these guys for later GVN.
 883     record_for_igvn(parm);
 884   }
 885   for (; i < map->req(); i++) {
 886     map->init_req(i, top());
 887   }
 888   assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
 889   set_default_node_notes(old_nn);

 890   return jvms;
 891 }
 892 
 893 //-----------------------------make_node_notes---------------------------------
 894 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
 895   if (caller_nn == NULL)  return NULL;
 896   Node_Notes* nn = caller_nn->clone(C);
 897   JVMState* caller_jvms = nn->jvms();
 898   JVMState* jvms = new (C) JVMState(method(), caller_jvms);
 899   jvms->set_offsets(0);
 900   jvms->set_bci(_entry_bci);
 901   nn->set_jvms(jvms);
 902   return nn;
 903 }
 904 
 905 
 906 //--------------------------return_values--------------------------------------
 907 void Compile::return_values(JVMState* jvms) {
 908   GraphKit kit(jvms);
 909   Node* ret = new ReturnNode(TypeFunc::Parms,
 910                              kit.control(),
 911                              kit.i_o(),
 912                              kit.reset_memory(),
 913                              kit.frameptr(),
 914                              kit.returnadr());
 915   // Add zero or 1 return values
 916   int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
 917   if (ret_size > 0) {
 918     kit.inc_sp(-ret_size);  // pop the return value(s)
 919     kit.sync_jvms();
 920     Node* res = kit.argument(0);
 921     if (tf()->returns_inline_type_as_fields()) {
 922       // Multiple return values (inline type fields): add as many edges
 923       // to the Return node as returned values.
 924       InlineTypeBaseNode* vt = res->as_InlineTypeBase();
 925       ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
 926       if (vt->is_allocated(&kit.gvn()) && !StressInlineTypeReturnedAsFields) {
 927         ret->init_req(TypeFunc::Parms, vt->get_oop());
 928       } else {
 929         ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn()));
 930       }
 931       uint idx = TypeFunc::Parms + 1;
 932       vt->pass_fields(&kit, ret, idx);
 933     } else {
 934       if (res->is_InlineType()) {
 935         assert(res->as_InlineType()->is_allocated(&kit.gvn()), "must be allocated");
 936         res = res->as_InlineType()->get_oop();
 937       }
 938       ret->add_req(res);
 939       // Note:  The second dummy edge is not needed by a ReturnNode.
 940     }
 941   }
 942   // bind it to root
 943   root()->add_req(ret);
 944   record_for_igvn(ret);
 945   initial_gvn()->transform_no_reclaim(ret);
 946 }
 947 
 948 //------------------------rethrow_exceptions-----------------------------------
 949 // Bind all exception states in the list into a single RethrowNode.
 950 void Compile::rethrow_exceptions(JVMState* jvms) {
 951   GraphKit kit(jvms);
 952   if (!kit.has_exceptions())  return;  // nothing to generate
 953   // Load my combined exception state into the kit, with all phis transformed:
 954   SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
 955   Node* ex_oop = kit.use_exception_state(ex_map);
 956   RethrowNode* exit = new RethrowNode(kit.control(),
 957                                       kit.i_o(), kit.reset_memory(),
 958                                       kit.frameptr(), kit.returnadr(),
 959                                       // like a return but with exception input
 960                                       ex_oop);

1044   //    to complete, we force all writes to complete.
1045   //
1046   // 2. Experimental VM option is used to force the barrier if any field
1047   //    was written out in the constructor.
1048   //
1049   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
1050   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
1051   //    MemBarVolatile is used before volatile load instead of after volatile
1052   //    store, so there's no barrier after the store.
1053   //    We want to guarantee the same behavior as on platforms with total store
1054   //    order, although this is not required by the Java memory model.
1055   //    In this case, we want to enforce visibility of volatile field
1056   //    initializations which are performed in constructors.
1057   //    So as with finals, we add a barrier here.
1058   //
1059   // "All bets are off" unless the first publication occurs after a
1060   // normal return from the constructor.  We do not attempt to detect
1061   // such unusual early publications.  But no barrier is needed on
1062   // exceptional returns, since they cannot publish normally.
1063   //
1064   if (method()->is_object_constructor_or_class_initializer() &&
1065        (wrote_final() ||
1066          (AlwaysSafeConstructors && wrote_fields()) ||
1067          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
1068     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
1069 
1070     // If Memory barrier is created for final fields write
1071     // and allocation node does not escape the initialize method,
1072     // then barrier introduced by allocation node can be removed.
1073     if (DoEscapeAnalysis && alloc_with_final()) {
1074       AllocateNode *alloc = AllocateNode::Ideal_allocation(alloc_with_final(), &_gvn);
1075       alloc->compute_MemBar_redundancy(method());
1076     }
1077     if (PrintOpto && (Verbose || WizardMode)) {
1078       method()->print_name();
1079       tty->print_cr(" writes finals and needs a memory barrier");
1080     }
1081   }
1082 
1083   // Any method can write a @Stable field; insert memory barriers
1084   // after those also. Can't bind predecessor allocation node (if any)
1085   // with barrier because allocation doesn't always dominate
1086   // MemBarRelease.
1087   if (wrote_stable()) {
1088     _exits.insert_mem_bar(Op_MemBarRelease);
1089     if (PrintOpto && (Verbose || WizardMode)) {
1090       method()->print_name();
1091       tty->print_cr(" writes @Stable and needs a memory barrier");
1092     }
1093   }
1094 
1095   for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
1096     // transform each slice of the original memphi:
1097     mms.set_memory(_gvn.transform(mms.memory()));
1098   }
1099   // Clean up input MergeMems created by transforming the slices
1100   _gvn.transform(_exits.merged_memory());
1101 
1102   if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
1103     const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
1104     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
1105     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
1106       // If the type we set for the ret_phi in build_exits() is too optimistic and
1107       // the ret_phi is top now, there's an extremely small chance that it may be due to class
1108       // loading.  It could also be due to an error, so mark this method as not compilable because
1109       // otherwise this could lead to an infinite compile loop.
1110       // In any case, this code path is rarely (and never in my testing) reached.
1111       C->record_method_not_compilable("Can't determine return type.");
1112       return;
1113     }
1114     if (ret_type->isa_int()) {
1115       BasicType ret_bt = method()->return_type()->basic_type();
1116       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1117     }
1118     _exits.push_node(ret_type->basic_type(), ret_phi);
1119   }
1120 
1121   // Note:  Logic for creating and optimizing the ReturnNode is in Compile.
1122 
1123   // Unlock along the exceptional paths.

1176 }
1177 
1178 //-----------------------------create_entry_map-------------------------------
1179 // Initialize our parser map to contain the types at method entry.
1180 // For OSR, the map contains a single RawPtr parameter.
1181 // Initial monitor locking for sync. methods is performed by do_method_entry.
1182 SafePointNode* Parse::create_entry_map() {
1183   // Check for really stupid bail-out cases.
1184   uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack();
1185   if (len >= 32760) {
1186     C->record_method_not_compilable("too many local variables");
1187     return NULL;
1188   }
1189 
1190   // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
1191   _caller->map()->delete_replaced_nodes();
1192 
1193   // If this is an inlined method, we may have to do a receiver null check.
1194   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
1195     GraphKit kit(_caller);
1196     kit.null_check_receiver_before_call(method(), false);
1197     _caller = kit.transfer_exceptions_into_jvms();
1198     if (kit.stopped()) {
1199       _exits.add_exception_states_from(_caller);
1200       _exits.set_jvms(_caller);
1201       return NULL;
1202     }
1203   }
1204 
1205   assert(method() != NULL, "parser must have a method");
1206 
1207   // Create an initial safepoint to hold JVM state during parsing
1208   JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL);
1209   set_map(new SafePointNode(len, jvms));
1210   jvms->set_map(map());
1211   record_for_igvn(map());
1212   assert(jvms->endoff() == len, "correct jvms sizing");
1213 
1214   SafePointNode* inmap = _caller->map();
1215   assert(inmap != NULL, "must have inmap");
1216   // In case of null check on receiver above
1217   map()->transfer_replaced_nodes_from(inmap, _new_idx);
1218 
1219   uint i;
1220 
1221   // Pass thru the predefined input parameters.
1222   for (i = 0; i < TypeFunc::Parms; i++) {
1223     map()->init_req(i, inmap->in(i));
1224   }
1225 
1226   if (depth() == 1) {
1227     assert(map()->memory()->Opcode() == Op_Parm, "");
1228     // Insert the memory aliasing node
1229     set_all_memory(reset_memory());
1230   }
1231   assert(merged_memory(), "");
1232 
1233   // Now add the locals which are initially bound to arguments:
1234   uint arg_size = tf()->domain_sig()->cnt();
1235   ensure_stack(arg_size - TypeFunc::Parms);  // OSR methods have funny args
1236   for (i = TypeFunc::Parms; i < arg_size; i++) {
1237     map()->init_req(i, inmap->argument(_caller, i - TypeFunc::Parms));
1238   }
1239 
1240   // Clear out the rest of the map (locals and stack)
1241   for (i = arg_size; i < len; i++) {
1242     map()->init_req(i, top());
1243   }
1244 
1245   SafePointNode* entry_map = stop();
1246   return entry_map;
1247 }
1248 
1249 //-----------------------------do_method_entry--------------------------------
1250 // Emit any code needed in the pseudo-block before BCI zero.
1251 // The main thing to do is lock the receiver of a synchronized method.
1252 void Parse::do_method_entry() {
1253   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
1254   set_sp(0);                         // Java Stack Pointer

1288 
1289   // If the method is synchronized, we need to construct a lock node, attach
1290   // it to the Start node, and pin it there.
1291   if (method()->is_synchronized()) {
1292     // Insert a FastLockNode right after the Start which takes as arguments
1293     // the current thread pointer, the "this" pointer & the address of the
1294     // stack slot pair used for the lock.  The "this" pointer is a projection
1295     // off the start node, but the locking spot has to be constructed by
1296     // creating a ConLNode of 0, and boxing it with a BoxLockNode.  The BoxLockNode
1297     // becomes the second argument to the FastLockNode call.  The
1298     // FastLockNode becomes the new control parent to pin it to the start.
1299 
1300     // Setup Object Pointer
1301     Node *lock_obj = NULL;
1302     if (method()->is_static()) {
1303       ciInstance* mirror = _method->holder()->java_mirror();
1304       const TypeInstPtr *t_lock = TypeInstPtr::make(mirror);
1305       lock_obj = makecon(t_lock);
1306     } else {                  // Else pass the "this" pointer,
1307       lock_obj = local(0);    // which is Parm0 from StartNode
1308       assert(!_gvn.type(lock_obj)->make_oopptr()->can_be_inline_type(), "can't be an inline type");
1309     }
1310     // Clear out dead values from the debug info.
1311     kill_dead_locals();
1312     // Build the FastLockNode
1313     _synch_lock = shared_lock(lock_obj);
1314   }
1315 
1316   // Feed profiling data for parameters to the type system so it can
1317   // propagate it as speculative types
1318   record_profiled_parameters_for_speculation();
1319 }
1320 
1321 //------------------------------init_blocks------------------------------------
1322 // Initialize our parser map to contain the types/monitors at method entry.
1323 void Parse::init_blocks() {
1324   // Create the blocks.
1325   _block_count = flow()->block_count();
1326   _blocks = NEW_RESOURCE_ARRAY(Block, _block_count);
1327 
1328   // Initialize the structs.

1700 //--------------------handle_missing_successor---------------------------------
1701 void Parse::handle_missing_successor(int target_bci) {
1702 #ifndef PRODUCT
1703   Block* b = block();
1704   int trap_bci = b->flow()->has_trap()? b->flow()->trap_bci(): -1;
1705   tty->print_cr("### Missing successor at bci:%d for block #%d (trap_bci:%d)", target_bci, b->rpo(), trap_bci);
1706 #endif
1707   ShouldNotReachHere();
1708 }
1709 
1710 //--------------------------merge_common---------------------------------------
1711 void Parse::merge_common(Parse::Block* target, int pnum) {
1712   if (TraceOptoParse) {
1713     tty->print("Merging state at block #%d bci:%d", target->rpo(), target->start());
1714   }
1715 
1716   // Zap extra stack slots to top
1717   assert(sp() == target->start_sp(), "");
1718   clean_stack(sp());
1719 
1720   // Check for merge conflicts involving inline types
1721   JVMState* old_jvms = map()->jvms();
1722   int old_bci = bci();
1723   JVMState* tmp_jvms = old_jvms->clone_shallow(C);
1724   tmp_jvms->set_should_reexecute(true);
1725   tmp_jvms->bind_map(map());
1726   // Execution needs to restart a the next bytecode (entry of next
1727   // block)
1728   if (target->is_merged() ||
1729       pnum > PhiNode::Input ||
1730       target->is_handler() ||
1731       target->is_loop_head()) {
1732     set_parse_bci(target->start());
1733     for (uint j = TypeFunc::Parms; j < map()->req(); j++) {
1734       Node* n = map()->in(j);                 // Incoming change to target state.
1735       const Type* t = NULL;
1736       if (tmp_jvms->is_loc(j)) {
1737         t = target->local_type_at(j - tmp_jvms->locoff());
1738       } else if (tmp_jvms->is_stk(j) && j < (uint)sp() + tmp_jvms->stkoff()) {
1739         t = target->stack_type_at(j - tmp_jvms->stkoff());
1740       }
1741       if (t != NULL && t != Type::BOTTOM) {
1742         if (n->is_InlineType() && !t->isa_inlinetype()) {
1743           // TODO Currently, the implementation relies on the assumption that InlineTypePtrNodes
1744           // are always buffered. We therefore need to allocate here.
1745           // Allocate inline type in src block to be able to merge it with oop in target block
1746           map()->set_req(j, n->as_InlineType()->buffer(this));
1747         } else if (!n->is_InlineTypeBase() && t->is_inlinetypeptr()) {
1748           // Scalarize null in src block to be able to merge it with inline type in target block
1749           assert(gvn().type(n)->is_zero_type(), "Should have been scalarized");
1750           map()->set_req(j, InlineTypePtrNode::make_null(gvn(), t->inline_klass()));
1751         }
1752       }
1753     }
1754   }
1755   old_jvms->bind_map(map());
1756   set_parse_bci(old_bci);
1757 
1758   if (!target->is_merged()) {   // No prior mapping at this bci
1759     if (TraceOptoParse) { tty->print(" with empty state");  }
1760 
1761     // If this path is dead, do not bother capturing it as a merge.
1762     // It is "as if" we had 1 fewer predecessors from the beginning.
1763     if (stopped()) {
1764       if (TraceOptoParse)  tty->print_cr(", but path is dead and doesn't count");
1765       return;
1766     }
1767 
1768     // Make a region if we know there are multiple or unpredictable inputs.
1769     // (Also, if this is a plain fall-through, we might see another region,
1770     // which must not be allowed into this block's map.)
1771     if (pnum > PhiNode::Input         // Known multiple inputs.
1772         || target->is_handler()       // These have unpredictable inputs.
1773         || target->is_loop_head()     // Known multiple inputs
1774         || control()->is_Region()) {  // We must hide this guy.
1775 
1776       int current_bci = bci();
1777       set_parse_bci(target->start()); // Set target bci

1791       gvn().set_type(r, Type::CONTROL);
1792       record_for_igvn(r);
1793       // zap all inputs to NULL for debugging (done in Node(uint) constructor)
1794       // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); }
1795       r->init_req(pnum, control());
1796       set_control(r);
1797       set_parse_bci(current_bci); // Restore bci
1798     }
1799 
1800     // Convert the existing Parser mapping into a mapping at this bci.
1801     store_state_to(target);
1802     assert(target->is_merged(), "do not come here twice");
1803 
1804   } else {                      // Prior mapping at this bci
1805     if (TraceOptoParse) {  tty->print(" with previous state"); }
1806 #ifdef ASSERT
1807     if (target->is_SEL_head()) {
1808       target->mark_merged_backedge(block());
1809     }
1810 #endif
1811 
1812     // We must not manufacture more phis if the target is already parsed.
1813     bool nophi = target->is_parsed();
1814 
1815     SafePointNode* newin = map();// Hang on to incoming mapping
1816     Block* save_block = block(); // Hang on to incoming block;
1817     load_state_from(target);    // Get prior mapping
1818 
1819     assert(newin->jvms()->locoff() == jvms()->locoff(), "JVMS layouts agree");
1820     assert(newin->jvms()->stkoff() == jvms()->stkoff(), "JVMS layouts agree");
1821     assert(newin->jvms()->monoff() == jvms()->monoff(), "JVMS layouts agree");
1822     assert(newin->jvms()->endoff() == jvms()->endoff(), "JVMS layouts agree");
1823 
1824     // Iterate over my current mapping and the old mapping.
1825     // Where different, insert Phi functions.
1826     // Use any existing Phi functions.
1827     assert(control()->is_Region(), "must be merging to a region");
1828     RegionNode* r = control()->as_Region();
1829 
1830     // Compute where to merge into
1831     // Merge incoming control path
1832     r->init_req(pnum, newin->control());
1833 
1834     if (pnum == 1) {            // Last merge for this Region?
1835       if (!block()->flow()->is_irreducible_entry()) {
1836         Node* result = _gvn.transform_no_reclaim(r);
1837         if (r != result && TraceOptoParse) {
1838           tty->print_cr("Block #%d replace %d with %d", block()->rpo(), r->_idx, result->_idx);
1839         }
1840       }
1841       record_for_igvn(r);
1842     }
1843 
1844     // Update all the non-control inputs to map:
1845     assert(TypeFunc::Parms == newin->jvms()->locoff(), "parser map should contain only youngest jvms");
1846     bool check_elide_phi = target->is_SEL_backedge(save_block);
1847     bool last_merge = (pnum == PhiNode::Input);
1848     for (uint j = 1; j < newin->req(); j++) {
1849       Node* m = map()->in(j);   // Current state of target.
1850       Node* n = newin->in(j);   // Incoming change to target state.
1851       PhiNode* phi;
1852       if (m->is_Phi() && m->as_Phi()->region() == r) {
1853         phi = m->as_Phi();
1854       } else if (m->is_InlineTypeBase() && m->as_InlineTypeBase()->has_phi_inputs(r)) {
1855         phi = m->as_InlineTypeBase()->get_oop()->as_Phi();
1856       } else {
1857         phi = NULL;
1858       }
1859       if (m != n) {             // Different; must merge
1860         switch (j) {
1861         // Frame pointer and Return Address never changes
1862         case TypeFunc::FramePtr:// Drop m, use the original value
1863         case TypeFunc::ReturnAdr:
1864           break;
1865         case TypeFunc::Memory:  // Merge inputs to the MergeMem node
1866           assert(phi == NULL, "the merge contains phis, not vice versa");
1867           merge_memory_edges(n->as_MergeMem(), pnum, nophi);
1868           continue;
1869         default:                // All normal stuff
1870           if (phi == NULL) {
1871             const JVMState* jvms = map()->jvms();
1872             if (EliminateNestedLocks &&
1873                 jvms->is_mon(j) && jvms->is_monitor_box(j)) {
1874               // BoxLock nodes are not commoning.
1875               // Use old BoxLock node as merged box.
1876               assert(newin->jvms()->is_monitor_box(j), "sanity");
1877               // This assert also tests that nodes are BoxLock.
1878               assert(BoxLockNode::same_slot(n, m), "sanity");
1879               C->gvn_replace_by(n, m);
1880             } else if (!check_elide_phi || !target->can_elide_SEL_phi(j)) {
1881               phi = ensure_phi(j, nophi);
1882             }
1883           }
1884           break;
1885         }
1886       }
1887       // At this point, n might be top if:
1888       //  - there is no phi (because TypeFlow detected a conflict), or
1889       //  - the corresponding control edges is top (a dead incoming path)
1890       // It is a bug if we create a phi which sees a garbage value on a live path.
1891 
1892       // Merging two inline types?
1893       if (phi != NULL && phi->bottom_type()->is_inlinetypeptr()) {
1894         // Reload current state because it may have been updated by ensure_phi
1895         m = map()->in(j);
1896         InlineTypeBaseNode* vtm = m->as_InlineTypeBase(); // Current inline type
1897         InlineTypeBaseNode* vtn = n->as_InlineTypeBase(); // Incoming inline type
1898         assert(vtm->get_oop() == phi, "Inline type should have Phi input");
1899         if (TraceOptoParse) {
1900 #ifdef ASSERT
1901           tty->print_cr("\nMerging inline types");
1902           tty->print_cr("Current:");
1903           vtm->dump(2);
1904           tty->print_cr("Incoming:");
1905           vtn->dump(2);
1906           tty->cr();
1907 #endif
1908         }
1909         // Do the merge
1910         vtm->merge_with(&_gvn, vtn, pnum, last_merge);
1911         if (last_merge) {
1912           map()->set_req(j, _gvn.transform_no_reclaim(vtm));
1913           record_for_igvn(vtm);
1914         }
1915       } else if (phi != NULL) {
1916         assert(n != top() || r->in(pnum) == top(), "live value must not be garbage");
1917         assert(phi->region() == r, "");
1918         phi->set_req(pnum, n);  // Then add 'n' to the merge
1919         if (last_merge) {
1920           // Last merge for this Phi.
1921           // So far, Phis have had a reasonable type from ciTypeFlow.
1922           // Now _gvn will join that with the meet of current inputs.
1923           // BOTTOM is never permissible here, 'cause pessimistically
1924           // Phis of pointers cannot lose the basic pointer type.
1925           debug_only(const Type* bt1 = phi->bottom_type());
1926           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
1927           map()->set_req(j, _gvn.transform_no_reclaim(phi));
1928           debug_only(const Type* bt2 = phi->bottom_type());
1929           assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
1930           record_for_igvn(phi);
1931         }
1932       }
1933     } // End of for all values to be merged
1934 
1935     if (last_merge && !r->in(0)) {         // The occasional useless Region

1936       assert(control() == r, "");
1937       set_control(r->nonnull_req());
1938     }
1939 
1940     map()->merge_replaced_nodes_with(newin);
1941 
1942     // newin has been subsumed into the lazy merge, and is now dead.
1943     set_block(save_block);
1944 
1945     stop();                     // done with this guy, for now
1946   }
1947 
1948   if (TraceOptoParse) {
1949     tty->print_cr(" on path %d", pnum);
1950   }
1951 
1952   // Done with this parser state.
1953   assert(stopped(), "");
1954 }
1955 

2067 
2068   // Add new path to the region.
2069   uint pnum = r->req();
2070   r->add_req(NULL);
2071 
2072   for (uint i = 1; i < map->req(); i++) {
2073     Node* n = map->in(i);
2074     if (i == TypeFunc::Memory) {
2075       // Ensure a phi on all currently known memories.
2076       for (MergeMemStream mms(n->as_MergeMem()); mms.next_non_empty(); ) {
2077         Node* phi = mms.memory();
2078         if (phi->is_Phi() && phi->as_Phi()->region() == r) {
2079           assert(phi->req() == pnum, "must be same size as region");
2080           phi->add_req(NULL);
2081         }
2082       }
2083     } else {
2084       if (n->is_Phi() && n->as_Phi()->region() == r) {
2085         assert(n->req() == pnum, "must be same size as region");
2086         n->add_req(NULL);
2087       } else if (n->is_InlineTypeBase() && n->as_InlineTypeBase()->has_phi_inputs(r)) {
2088         n->as_InlineTypeBase()->add_new_path(r);
2089       }
2090     }
2091   }
2092 
2093   return pnum;
2094 }
2095 
2096 //------------------------------ensure_phi-------------------------------------
2097 // Turn the idx'th entry of the current map into a Phi
2098 PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
2099   SafePointNode* map = this->map();
2100   Node* region = map->control();
2101   assert(region->is_Region(), "");
2102 
2103   Node* o = map->in(idx);
2104   assert(o != NULL, "");
2105 
2106   if (o == top())  return NULL; // TOP always merges into TOP
2107 
2108   if (o->is_Phi() && o->as_Phi()->region() == region) {
2109     return o->as_Phi();
2110   }
2111   InlineTypeBaseNode* vt = o->isa_InlineTypeBase();
2112   if (vt != NULL && vt->has_phi_inputs(region)) {
2113     return vt->get_oop()->as_Phi();
2114   }
2115 
2116   // Now use a Phi here for merging
2117   assert(!nocreate, "Cannot build a phi for a block already parsed.");
2118   const JVMState* jvms = map->jvms();
2119   const Type* t = NULL;
2120   if (jvms->is_loc(idx)) {
2121     t = block()->local_type_at(idx - jvms->locoff());
2122   } else if (jvms->is_stk(idx)) {
2123     t = block()->stack_type_at(idx - jvms->stkoff());
2124   } else if (jvms->is_mon(idx)) {
2125     assert(!jvms->is_monitor_box(idx), "no phis for boxes");
2126     t = TypeInstPtr::BOTTOM; // this is sufficient for a lock object
2127   } else if ((uint)idx < TypeFunc::Parms) {
2128     t = o->bottom_type();  // Type::RETURN_ADDRESS or such-like.
2129   } else {
2130     assert(false, "no type information for this phi");
2131   }
2132 
2133   // If the type falls to bottom, then this must be a local that
2134   // is already dead or is mixing ints and oops or some such.
2135   // Forcing it to top makes it go dead.
2136   if (t == Type::BOTTOM) {
2137     map->set_req(idx, top());
2138     return NULL;
2139   }
2140 
2141   // Do not create phis for top either.
2142   // A top on a non-null control flow must be an unused even after the.phi.
2143   if (t == Type::TOP || t == Type::HALF) {
2144     map->set_req(idx, top());
2145     return NULL;
2146   }
2147 
2148   if (vt != NULL && (t->is_inlinetypeptr() || t->isa_inlinetype())) {
2149     // Inline types are merged by merging their field values.
2150     // Create a cloned InlineTypeNode with phi inputs that
2151     // represents the merged inline type and update the map.
2152     vt = vt->clone_with_phis(&_gvn, region);
2153     map->set_req(idx, vt);
2154     return vt->get_oop()->as_Phi();
2155   } else {
2156     PhiNode* phi = PhiNode::make(region, o, t);
2157     gvn().set_type(phi, t);
2158     if (C->do_escape_analysis()) record_for_igvn(phi);
2159     map->set_req(idx, phi);
2160     return phi;
2161   }
2162 }
2163 
2164 //--------------------------ensure_memory_phi----------------------------------
2165 // Turn the idx'th slice of the current memory into a Phi
2166 PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) {
2167   MergeMemNode* mem = merged_memory();
2168   Node* region = control();
2169   assert(region->is_Region(), "");
2170 
2171   Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx);
2172   assert(o != NULL && o != top(), "");
2173 
2174   PhiNode* phi;
2175   if (o->is_Phi() && o->as_Phi()->region() == region) {
2176     phi = o->as_Phi();
2177     if (phi == mem->base_memory() && idx >= Compile::AliasIdxRaw) {
2178       // clone the shared base memory phi to make a new memory split
2179       assert(!nocreate, "Cannot build a phi for a block already parsed.");
2180       const Type* t = phi->bottom_type();
2181       const TypePtr* adr_type = C->get_adr_type(idx);

2334   Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2335   Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2336   store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2337   Node *chk   = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2338   Node* tst   = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2339   { BuildCutout unless(this, tst, PROB_ALWAYS);
2340     uncommon_trap(Deoptimization::Reason_tenured,
2341                   Deoptimization::Action_make_not_entrant);
2342   }
2343 }
2344 
2345 //------------------------------return_current---------------------------------
2346 // Append current _map to _exit_return
2347 void Parse::return_current(Node* value) {
2348   if (RegisterFinalizersAtInit &&
2349       method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2350     call_register_finalizer();
2351   }
2352 
2353   // Do not set_parse_bci, so that return goo is credited to the return insn.
2354   // vreturn can trigger an allocation so vreturn can throw. Setting
2355   // the bci here breaks exception handling. Commenting this out
2356   // doesn't seem to break anything.
2357   //  set_bci(InvocationEntryBci);
2358   if (method()->is_synchronized() && GenerateSynchronizationCode) {
2359     shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2360   }
2361   if (C->env()->dtrace_method_probes()) {
2362     make_dtrace_method_exit(method());
2363   }

















2364   // frame pointer is always same, already captured
2365   if (value != NULL) {



2366     Node* phi = _exits.argument(0);
2367     const Type* return_type = phi->bottom_type();
2368     const TypeOopPtr* tr = return_type->isa_oopptr();
2369     // The return_type is set in Parse::build_exits().
2370     if (return_type->isa_inlinetype()) {
2371       // Inline type is returned as fields, make sure it is scalarized
2372       if (!value->is_InlineType()) {
2373         value = InlineTypeNode::make_from_oop(this, value, return_type->inline_klass());
2374       }
2375       if (!_caller->has_method() || Compile::current()->inlining_incrementally()) {
2376         // Returning from root or an incrementally inlined method. Make sure all non-flattened
2377         // fields are buffered and re-execute if allocation triggers deoptimization.
2378         PreserveReexecuteState preexecs(this);
2379         assert(tf()->returns_inline_type_as_fields(), "must be returned as fields");
2380         jvms()->set_should_reexecute(true);
2381         inc_sp(1);
2382         value = value->as_InlineType()->allocate_fields(this);
2383       }
2384     } else if (value->is_InlineType()) {
2385       // Inline type is returned as oop, make sure it is buffered and re-execute
2386       // if allocation triggers deoptimization.
2387       PreserveReexecuteState preexecs(this);
2388       jvms()->set_should_reexecute(true);
2389       inc_sp(1);
2390       value = value->as_InlineType()->buffer(this);
2391     } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
2392       // If returning oops to an interface-return, there is a silent free
2393       // cast from oop to interface allowed by the Verifier. Make it explicit here.
2394       const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
2395       if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
2396         // sharpen the type eagerly; this eases certain assert checking
2397         if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
2398           tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2399         }
2400         value = _gvn.transform(new CheckCastPPNode(0, value, tr));
2401       }
2402     } else {
2403       // Handle returns of oop-arrays to an arrays-of-interface return
2404       const TypeInstPtr* phi_tip;
2405       const TypeInstPtr* val_tip;
2406       Type::get_arrays_base_elements(return_type, value->bottom_type(), &phi_tip, &val_tip);
2407       if (phi_tip != NULL && phi_tip->is_loaded() && phi_tip->klass()->is_interface() &&
2408           val_tip != NULL && val_tip->is_loaded() && !val_tip->klass()->is_interface()) {
2409         value = _gvn.transform(new CheckCastPPNode(0, value, return_type));
2410       }
2411     }
2412     phi->add_req(value);
2413   }
2414 
2415   SafePointNode* exit_return = _exits.map();
2416   exit_return->in( TypeFunc::Control  )->add_req( control() );
2417   exit_return->in( TypeFunc::I_O      )->add_req( i_o    () );
2418   Node *mem = exit_return->in( TypeFunc::Memory   );
2419   for (MergeMemStream mms(mem->as_MergeMem(), merged_memory()); mms.next_non_empty2(); ) {
2420     if (mms.is_empty()) {
2421       // get a copy of the base memory, and patch just this one input
2422       const TypePtr* adr_type = mms.adr_type(C);
2423       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
2424       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
2425       gvn().set_type_bottom(phi);
2426       phi->del_req(phi->req()-1);  // prepare to re-patch
2427       mms.set_memory(phi);
2428     }
2429     mms.memory()->add_req(mms.memory2());
2430   }
2431 
2432   if (_first_return) {
2433     _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
2434     _first_return = false;
2435   } else {
2436     _exits.map()->merge_replaced_nodes_with(map());
2437   }
2438 
2439   stop_and_kill_map();          // This CFG path dies here
2440 }
2441 
2442 
2443 //------------------------------add_safepoint----------------------------------
2444 void Parse::add_safepoint() {
2445   uint parms = TypeFunc::Parms+1;
2446 
2447   // Clear out dead values from the debug info.
2448   kill_dead_locals();
2449 
2450   // Clone the JVM State
2451   SafePointNode *sfpnt = new SafePointNode(parms, NULL);
< prev index next >