1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compressedOops.hpp"
  31 #include "opto/ad.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/idealGraphPrinter.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/opcodes.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/type.hpp"
  43 #include "opto/vectornode.hpp"
  44 #include "runtime/os.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/align.hpp"
  47 
  48 OptoReg::Name OptoReg::c_frame_pointer;
  49 
  50 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  51 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  52 RegMask Matcher::caller_save_regmask;
  53 RegMask Matcher::caller_save_regmask_exclude_soe;
  54 RegMask Matcher::mh_caller_save_regmask;
  55 RegMask Matcher::mh_caller_save_regmask_exclude_soe;
  56 RegMask Matcher::STACK_ONLY_mask;
  57 RegMask Matcher::c_frame_ptr_mask;
  58 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  59 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  60 
  61 //---------------------------Matcher-------------------------------------------
  62 Matcher::Matcher()
  63 : PhaseTransform( Phase::Ins_Select ),
  64   _states_arena(Chunk::medium_size, mtCompiler),
  65   _new_nodes(C->comp_arena()),
  66   _visited(&_states_arena),
  67   _shared(&_states_arena),
  68   _dontcare(&_states_arena),
  69   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  70   _swallowed(swallowed),
  71   _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  72   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  73   _must_clone(must_clone),
  74   _shared_nodes(C->comp_arena()),
  75 #ifndef PRODUCT
  76   _old2new_map(C->comp_arena()),
  77   _new2old_map(C->comp_arena()),
  78   _reused(C->comp_arena()),
  79 #endif // !PRODUCT
  80   _allocation_started(false),
  81   _ruleName(ruleName),
  82   _register_save_policy(register_save_policy),
  83   _c_reg_save_policy(c_reg_save_policy),
  84   _register_save_type(register_save_type) {
  85   C->set_matcher(this);
  86 
  87   idealreg2spillmask  [Op_RegI] = nullptr;
  88   idealreg2spillmask  [Op_RegN] = nullptr;
  89   idealreg2spillmask  [Op_RegL] = nullptr;
  90   idealreg2spillmask  [Op_RegF] = nullptr;
  91   idealreg2spillmask  [Op_RegD] = nullptr;
  92   idealreg2spillmask  [Op_RegP] = nullptr;
  93   idealreg2spillmask  [Op_VecA] = nullptr;
  94   idealreg2spillmask  [Op_VecS] = nullptr;
  95   idealreg2spillmask  [Op_VecD] = nullptr;
  96   idealreg2spillmask  [Op_VecX] = nullptr;
  97   idealreg2spillmask  [Op_VecY] = nullptr;
  98   idealreg2spillmask  [Op_VecZ] = nullptr;
  99   idealreg2spillmask  [Op_RegFlags] = nullptr;
 100   idealreg2spillmask  [Op_RegVectMask] = nullptr;
 101 
 102   idealreg2debugmask  [Op_RegI] = nullptr;
 103   idealreg2debugmask  [Op_RegN] = nullptr;
 104   idealreg2debugmask  [Op_RegL] = nullptr;
 105   idealreg2debugmask  [Op_RegF] = nullptr;
 106   idealreg2debugmask  [Op_RegD] = nullptr;
 107   idealreg2debugmask  [Op_RegP] = nullptr;
 108   idealreg2debugmask  [Op_VecA] = nullptr;
 109   idealreg2debugmask  [Op_VecS] = nullptr;
 110   idealreg2debugmask  [Op_VecD] = nullptr;
 111   idealreg2debugmask  [Op_VecX] = nullptr;
 112   idealreg2debugmask  [Op_VecY] = nullptr;
 113   idealreg2debugmask  [Op_VecZ] = nullptr;
 114   idealreg2debugmask  [Op_RegFlags] = nullptr;
 115   idealreg2debugmask  [Op_RegVectMask] = nullptr;
 116 
 117   idealreg2mhdebugmask[Op_RegI] = nullptr;
 118   idealreg2mhdebugmask[Op_RegN] = nullptr;
 119   idealreg2mhdebugmask[Op_RegL] = nullptr;
 120   idealreg2mhdebugmask[Op_RegF] = nullptr;
 121   idealreg2mhdebugmask[Op_RegD] = nullptr;
 122   idealreg2mhdebugmask[Op_RegP] = nullptr;
 123   idealreg2mhdebugmask[Op_VecA] = nullptr;
 124   idealreg2mhdebugmask[Op_VecS] = nullptr;
 125   idealreg2mhdebugmask[Op_VecD] = nullptr;
 126   idealreg2mhdebugmask[Op_VecX] = nullptr;
 127   idealreg2mhdebugmask[Op_VecY] = nullptr;
 128   idealreg2mhdebugmask[Op_VecZ] = nullptr;
 129   idealreg2mhdebugmask[Op_RegFlags] = nullptr;
 130   idealreg2mhdebugmask[Op_RegVectMask] = nullptr;
 131 
 132   debug_only(_mem_node = nullptr;)   // Ideal memory node consumed by mach node
 133 }
 134 
 135 //------------------------------warp_incoming_stk_arg------------------------
 136 // This warps a VMReg into an OptoReg::Name
 137 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 138   OptoReg::Name warped;
 139   if( reg->is_stack() ) {  // Stack slot argument?
 140     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 141     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 142     if( warped >= _in_arg_limit )
 143       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 144     if (!RegMask::can_represent_arg(warped)) {
 145       // the compiler cannot represent this method's calling sequence
 146       // Bailout. We do not have space to represent all arguments.
 147       C->record_method_not_compilable("unsupported incoming calling sequence");
 148       return OptoReg::Bad;
 149     }
 150     return warped;
 151   }
 152   return OptoReg::as_OptoReg(reg);
 153 }
 154 
 155 //---------------------------compute_old_SP------------------------------------
 156 OptoReg::Name Compile::compute_old_SP() {
 157   int fixed    = fixed_slots();
 158   int preserve = in_preserve_stack_slots();
 159   return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
 160 }
 161 
 162 
 163 
 164 #ifdef ASSERT
 165 void Matcher::verify_new_nodes_only(Node* xroot) {
 166   // Make sure that the new graph only references new nodes
 167   ResourceMark rm;
 168   Unique_Node_List worklist;
 169   VectorSet visited;
 170   worklist.push(xroot);
 171   while (worklist.size() > 0) {
 172     Node* n = worklist.pop();
 173     visited.set(n->_idx);
 174     assert(C->node_arena()->contains(n), "dead node");
 175     for (uint j = 0; j < n->req(); j++) {
 176       Node* in = n->in(j);
 177       if (in != nullptr) {
 178         assert(C->node_arena()->contains(in), "dead node");
 179         if (!visited.test(in->_idx)) {
 180           worklist.push(in);
 181         }
 182       }
 183     }
 184   }
 185 }
 186 #endif
 187 
 188 // Array of RegMask, one per returned values (inline type instances can
 189 // be returned as multiple return values, one per field)
 190 RegMask* Matcher::return_values_mask(const TypeFunc* tf) {
 191   const TypeTuple* range = tf->range_cc();
 192   uint cnt = range->cnt() - TypeFunc::Parms;
 193   if (cnt == 0) {
 194     return nullptr;
 195   }
 196   RegMask* mask = NEW_RESOURCE_ARRAY(RegMask, cnt);
 197   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt);
 198   VMRegPair* vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt);
 199   for (uint i = 0; i < cnt; i++) {
 200     sig_bt[i] = range->field_at(i+TypeFunc::Parms)->basic_type();
 201   }
 202 
 203   int regs = SharedRuntime::java_return_convention(sig_bt, vm_parm_regs, cnt);
 204   if (regs <= 0) {
 205     // We ran out of registers to store the IsInit information for a nullable inline type return.
 206     // Since it is only set in the 'call_epilog', we can simply put it on the stack.
 207     assert(tf->returns_inline_type_as_fields(), "should have been tested during graph construction");
 208     // TODO 8284443 Can we teach the register allocator to reserve a stack slot instead?
 209     // mask[--cnt] = STACK_ONLY_mask does not work (test with -XX:+StressGCM)
 210     int slot = C->fixed_slots() - 2;
 211     if (C->needs_stack_repair()) {
 212       slot -= 2; // Account for stack increment value
 213     }
 214     mask[--cnt].Clear();
 215     mask[cnt].Insert(OptoReg::stack2reg(slot));
 216   }
 217   for (uint i = 0; i < cnt; i++) {
 218     mask[i].Clear();
 219 
 220     OptoReg::Name reg1 = OptoReg::as_OptoReg(vm_parm_regs[i].first());
 221     if (OptoReg::is_valid(reg1)) {
 222       mask[i].Insert(reg1);
 223     }
 224     OptoReg::Name reg2 = OptoReg::as_OptoReg(vm_parm_regs[i].second());
 225     if (OptoReg::is_valid(reg2)) {
 226       mask[i].Insert(reg2);
 227     }
 228   }
 229 
 230   return mask;
 231 }
 232 
 233 //---------------------------match---------------------------------------------
 234 void Matcher::match( ) {
 235   if( MaxLabelRootDepth < 100 ) { // Too small?
 236     assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
 237     MaxLabelRootDepth = 100;
 238   }
 239   // One-time initialization of some register masks.
 240   init_spill_mask( C->root()->in(1) );
 241   if (C->failing()) {
 242     return;
 243   }
 244   _return_addr_mask = return_addr();
 245 #ifdef _LP64
 246   // Pointers take 2 slots in 64-bit land
 247   _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
 248 #endif
 249 
 250   // Map Java-signature return types into return register-value
 251   // machine registers.
 252   _return_values_mask = return_values_mask(C->tf());
 253 
 254   // ---------------
 255   // Frame Layout
 256 
 257   // Need the method signature to determine the incoming argument types,
 258   // because the types determine which registers the incoming arguments are
 259   // in, and this affects the matched code.
 260   const TypeTuple *domain = C->tf()->domain_cc();
 261   uint             argcnt = domain->cnt() - TypeFunc::Parms;
 262   BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
 263   VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
 264   _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
 265   _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
 266   uint i;
 267   for( i = 0; i<argcnt; i++ ) {
 268     sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
 269   }
 270 
 271   // Pass array of ideal registers and length to USER code (from the AD file)
 272   // that will convert this to an array of register numbers.
 273   const StartNode *start = C->start();
 274   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
 275 #ifdef ASSERT
 276   // Sanity check users' calling convention.  Real handy while trying to
 277   // get the initial port correct.
 278   { for (uint i = 0; i<argcnt; i++) {
 279       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 280         assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
 281         _parm_regs[i].set_bad();
 282         continue;
 283       }
 284       VMReg parm_reg = vm_parm_regs[i].first();
 285       assert(parm_reg->is_valid(), "invalid arg?");
 286       if (parm_reg->is_reg()) {
 287         OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
 288         assert(can_be_java_arg(opto_parm_reg) ||
 289                C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
 290                opto_parm_reg == inline_cache_reg(),
 291                "parameters in register must be preserved by runtime stubs");
 292       }
 293       for (uint j = 0; j < i; j++) {
 294         assert(parm_reg != vm_parm_regs[j].first(),
 295                "calling conv. must produce distinct regs");
 296       }
 297     }
 298   }
 299 #endif
 300 
 301   // Do some initial frame layout.
 302 
 303   // Compute the old incoming SP (may be called FP) as
 304   //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
 305   _old_SP = C->compute_old_SP();
 306   assert( is_even(_old_SP), "must be even" );
 307 
 308   // Compute highest incoming stack argument as
 309   //   _old_SP + out_preserve_stack_slots + incoming argument size.
 310   _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 311   assert( is_even(_in_arg_limit), "out_preserve must be even" );
 312   for( i = 0; i < argcnt; i++ ) {
 313     // Permit args to have no register
 314     _calling_convention_mask[i].Clear();
 315     if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 316       _parm_regs[i].set_bad();
 317       continue;
 318     }
 319     // calling_convention returns stack arguments as a count of
 320     // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
 321     // the allocators point of view, taking into account all the
 322     // preserve area, locks & pad2.
 323 
 324     OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
 325     if (C->failing()) {
 326       return;
 327     }
 328     if( OptoReg::is_valid(reg1))
 329       _calling_convention_mask[i].Insert(reg1);
 330 
 331     OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
 332     if (C->failing()) {
 333       return;
 334     }
 335     if( OptoReg::is_valid(reg2))
 336       _calling_convention_mask[i].Insert(reg2);
 337 
 338     // Saved biased stack-slot register number
 339     _parm_regs[i].set_pair(reg2, reg1);
 340   }
 341 
 342   // Finally, make sure the incoming arguments take up an even number of
 343   // words, in case the arguments or locals need to contain doubleword stack
 344   // slots.  The rest of the system assumes that stack slot pairs (in
 345   // particular, in the spill area) which look aligned will in fact be
 346   // aligned relative to the stack pointer in the target machine.  Double
 347   // stack slots will always be allocated aligned.
 348   _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
 349 
 350   // Compute highest outgoing stack argument as
 351   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
 352   _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
 353   assert( is_even(_out_arg_limit), "out_preserve must be even" );
 354 
 355   if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
 356     // the compiler cannot represent this method's calling sequence
 357     // Bailout. We do not have space to represent all arguments.
 358     C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
 359   }
 360 
 361   if (C->failing())  return;  // bailed out on incoming arg failure
 362 
 363   // ---------------
 364   // Collect roots of matcher trees.  Every node for which
 365   // _shared[_idx] is cleared is guaranteed to not be shared, and thus
 366   // can be a valid interior of some tree.
 367   find_shared( C->root() );
 368   find_shared( C->top() );
 369 
 370   C->print_method(PHASE_BEFORE_MATCHING, 1);
 371 
 372   // Create new ideal node ConP #null even if it does exist in old space
 373   // to avoid false sharing if the corresponding mach node is not used.
 374   // The corresponding mach node is only used in rare cases for derived
 375   // pointers.
 376   Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 377 
 378   // Swap out to old-space; emptying new-space
 379   Arena* old = C->swap_old_and_new();
 380 
 381   // Save debug and profile information for nodes in old space:
 382   _old_node_note_array = C->node_note_array();
 383   if (_old_node_note_array != nullptr) {
 384     C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
 385                            (C->comp_arena(), _old_node_note_array->length(),
 386                             0, nullptr));
 387   }
 388 
 389   // Pre-size the new_node table to avoid the need for range checks.
 390   grow_new_node_array(C->unique());
 391 
 392   // Reset node counter so MachNodes start with _idx at 0
 393   int live_nodes = C->live_nodes();
 394   C->set_unique(0);
 395   C->reset_dead_node_list();
 396 
 397   // Recursively match trees from old space into new space.
 398   // Correct leaves of new-space Nodes; they point to old-space.
 399   _visited.clear();
 400   Node* const n = xform(C->top(), live_nodes);
 401   if (C->failing()) return;
 402   C->set_cached_top_node(n);
 403   if (!C->failing()) {
 404     Node* xroot =        xform( C->root(), 1 );
 405     if (C->failing()) return;
 406     if (xroot == nullptr) {
 407       Matcher::soft_match_failure();  // recursive matching process failed
 408       assert(false, "instruction match failed");
 409       C->record_method_not_compilable("instruction match failed");
 410     } else {
 411       // During matching shared constants were attached to C->root()
 412       // because xroot wasn't available yet, so transfer the uses to
 413       // the xroot.
 414       for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
 415         Node* n = C->root()->fast_out(j);
 416         if (C->node_arena()->contains(n)) {
 417           assert(n->in(0) == C->root(), "should be control user");
 418           n->set_req(0, xroot);
 419           --j;
 420           --jmax;
 421         }
 422       }
 423 
 424       // Generate new mach node for ConP #null
 425       assert(new_ideal_null != nullptr, "sanity");
 426       _mach_null = match_tree(new_ideal_null);
 427       // Don't set control, it will confuse GCM since there are no uses.
 428       // The control will be set when this node is used first time
 429       // in find_base_for_derived().
 430       assert(_mach_null != nullptr || C->failure_is_artificial(), ""); // bailouts are handled below.
 431 
 432       C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr);
 433 
 434 #ifdef ASSERT
 435       verify_new_nodes_only(xroot);
 436 #endif
 437     }
 438   }
 439   if (C->top() == nullptr || C->root() == nullptr) {
 440     // New graph lost. This is due to a compilation failure we encountered earlier.
 441     stringStream ss;
 442     if (C->failure_reason() != nullptr) {
 443       ss.print("graph lost: %s", C->failure_reason());
 444     } else {
 445       assert(C->failure_reason() != nullptr, "graph lost: reason unknown");
 446       ss.print("graph lost: reason unknown");
 447     }
 448     C->record_method_not_compilable(ss.as_string() DEBUG_ONLY(COMMA true));
 449   }
 450   if (C->failing()) {
 451     // delete old;
 452     old->destruct_contents();
 453     return;
 454   }
 455   assert( C->top(), "" );
 456   assert( C->root(), "" );
 457   validate_null_checks();
 458 
 459   // Now smoke old-space
 460   NOT_DEBUG( old->destruct_contents() );
 461 
 462   // ------------------------
 463   // Set up save-on-entry registers.
 464   Fixup_Save_On_Entry( );
 465 
 466   { // Cleanup mach IR after selection phase is over.
 467     Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
 468     do_postselect_cleanup();
 469     if (C->failing())  return;
 470     assert(verify_after_postselect_cleanup(), "");
 471   }
 472 }
 473 
 474 //------------------------------Fixup_Save_On_Entry----------------------------
 475 // The stated purpose of this routine is to take care of save-on-entry
 476 // registers.  However, the overall goal of the Match phase is to convert into
 477 // machine-specific instructions which have RegMasks to guide allocation.
 478 // So what this procedure really does is put a valid RegMask on each input
 479 // to the machine-specific variations of all Return, TailCall and Halt
 480 // instructions.  It also adds edgs to define the save-on-entry values (and of
 481 // course gives them a mask).
 482 
 483 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 484   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 485   // Do all the pre-defined register masks
 486   rms[TypeFunc::Control  ] = RegMask::Empty;
 487   rms[TypeFunc::I_O      ] = RegMask::Empty;
 488   rms[TypeFunc::Memory   ] = RegMask::Empty;
 489   rms[TypeFunc::ReturnAdr] = ret_adr;
 490   rms[TypeFunc::FramePtr ] = fp;
 491   return rms;
 492 }
 493 
 494 int Matcher::scalable_predicate_reg_slots() {
 495   assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
 496         "scalable predicate vector should be supported");
 497   int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
 498   // We assume each predicate register is one-eighth of the size of
 499   // scalable vector register, one mask bit per vector byte.
 500   int predicate_reg_bit_size = vector_reg_bit_size >> 3;
 501   // Compute number of slots which is required when scalable predicate
 502   // register is spilled. E.g. if scalable vector register is 640 bits,
 503   // predicate register is 80 bits, which is 2.5 * slots.
 504   // We will round up the slot number to power of 2, which is required
 505   // by find_first_set().
 506   int slots = predicate_reg_bit_size & (BitsPerInt - 1)
 507               ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
 508               : predicate_reg_bit_size >> LogBitsPerInt;
 509   return round_up_power_of_2(slots);
 510 }
 511 
 512 #define NOF_STACK_MASKS (3*13)
 513 
 514 // Create the initial stack mask used by values spilling to the stack.
 515 // Disallow any debug info in outgoing argument areas by setting the
 516 // initial mask accordingly.
 517 void Matcher::init_first_stack_mask() {
 518 
 519   // Allocate storage for spill masks as masks for the appropriate load type.
 520   RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
 521 
 522   // Initialize empty placeholder masks into the newly allocated arena
 523   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 524     new (rms + i) RegMask();
 525   }
 526 
 527   idealreg2spillmask  [Op_RegN] = &rms[0];
 528   idealreg2spillmask  [Op_RegI] = &rms[1];
 529   idealreg2spillmask  [Op_RegL] = &rms[2];
 530   idealreg2spillmask  [Op_RegF] = &rms[3];
 531   idealreg2spillmask  [Op_RegD] = &rms[4];
 532   idealreg2spillmask  [Op_RegP] = &rms[5];
 533 
 534   idealreg2debugmask  [Op_RegN] = &rms[6];
 535   idealreg2debugmask  [Op_RegI] = &rms[7];
 536   idealreg2debugmask  [Op_RegL] = &rms[8];
 537   idealreg2debugmask  [Op_RegF] = &rms[9];
 538   idealreg2debugmask  [Op_RegD] = &rms[10];
 539   idealreg2debugmask  [Op_RegP] = &rms[11];
 540 
 541   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 542   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 543   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 544   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 545   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 546   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 547 
 548   idealreg2spillmask  [Op_VecA] = &rms[18];
 549   idealreg2spillmask  [Op_VecS] = &rms[19];
 550   idealreg2spillmask  [Op_VecD] = &rms[20];
 551   idealreg2spillmask  [Op_VecX] = &rms[21];
 552   idealreg2spillmask  [Op_VecY] = &rms[22];
 553   idealreg2spillmask  [Op_VecZ] = &rms[23];
 554 
 555   idealreg2debugmask  [Op_VecA] = &rms[24];
 556   idealreg2debugmask  [Op_VecS] = &rms[25];
 557   idealreg2debugmask  [Op_VecD] = &rms[26];
 558   idealreg2debugmask  [Op_VecX] = &rms[27];
 559   idealreg2debugmask  [Op_VecY] = &rms[28];
 560   idealreg2debugmask  [Op_VecZ] = &rms[29];
 561 
 562   idealreg2mhdebugmask[Op_VecA] = &rms[30];
 563   idealreg2mhdebugmask[Op_VecS] = &rms[31];
 564   idealreg2mhdebugmask[Op_VecD] = &rms[32];
 565   idealreg2mhdebugmask[Op_VecX] = &rms[33];
 566   idealreg2mhdebugmask[Op_VecY] = &rms[34];
 567   idealreg2mhdebugmask[Op_VecZ] = &rms[35];
 568 
 569   idealreg2spillmask  [Op_RegVectMask] = &rms[36];
 570   idealreg2debugmask  [Op_RegVectMask] = &rms[37];
 571   idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
 572 
 573   OptoReg::Name i;
 574 
 575   // At first, start with the empty mask
 576   C->FIRST_STACK_mask().Clear();
 577 
 578   // Add in the incoming argument area
 579   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 580   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 581     C->FIRST_STACK_mask().Insert(i);
 582   }
 583 
 584   // Add in all bits past the outgoing argument area
 585   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 586             "must be able to represent all call arguments in reg mask");
 587   OptoReg::Name init = _out_arg_limit;
 588   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 589     C->FIRST_STACK_mask().Insert(i);
 590   }
 591   // Finally, set the "infinite stack" bit.
 592   C->FIRST_STACK_mask().set_AllStack();
 593 
 594   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 595   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 596   // Keep spill masks aligned.
 597   aligned_stack_mask.clear_to_pairs();
 598   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 599   RegMask scalable_stack_mask = aligned_stack_mask;
 600 
 601   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 602 #ifdef _LP64
 603   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 604    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 605    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 606 #else
 607    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 608 #endif
 609   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 610    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 611   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 612    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 613   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 614    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 615   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 616    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 617 
 618   if (Matcher::has_predicated_vectors()) {
 619     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 620      idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
 621   } else {
 622     *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
 623   }
 624 
 625   if (Matcher::vector_size_supported(T_BYTE,4)) {
 626     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 627      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 628   } else {
 629     *idealreg2spillmask[Op_VecS] = RegMask::Empty;
 630   }
 631 
 632   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 633     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 634     // RA guarantees such alignment since it is needed for Double and Long values.
 635     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 636      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 637   } else {
 638     *idealreg2spillmask[Op_VecD] = RegMask::Empty;
 639   }
 640 
 641   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 642     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
 643     //
 644     // RA can use input arguments stack slots for spills but until RA
 645     // we don't know frame size and offset of input arg stack slots.
 646     //
 647     // Exclude last input arg stack slots to avoid spilling vectors there
 648     // otherwise vector spills could stomp over stack slots in caller frame.
 649     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 650     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
 651       aligned_stack_mask.Remove(in);
 652       in = OptoReg::add(in, -1);
 653     }
 654      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 655      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 656     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 657      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 658   } else {
 659     *idealreg2spillmask[Op_VecX] = RegMask::Empty;
 660   }
 661 
 662   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 663     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 664     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 665     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 666       aligned_stack_mask.Remove(in);
 667       in = OptoReg::add(in, -1);
 668     }
 669      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 670      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 671     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 672      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 673   } else {
 674     *idealreg2spillmask[Op_VecY] = RegMask::Empty;
 675   }
 676 
 677   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 678     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 679     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 680     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 681       aligned_stack_mask.Remove(in);
 682       in = OptoReg::add(in, -1);
 683     }
 684      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 685      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 686     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 687      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 688   } else {
 689     *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
 690   }
 691 
 692   if (Matcher::supports_scalable_vector()) {
 693     int k = 1;
 694     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 695     if (Matcher::has_predicated_vectors()) {
 696       // Exclude last input arg stack slots to avoid spilling vector register there,
 697       // otherwise RegVectMask spills could stomp over stack slots in caller frame.
 698       for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
 699         scalable_stack_mask.Remove(in);
 700         in = OptoReg::add(in, -1);
 701       }
 702 
 703       // For RegVectMask
 704       scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
 705       assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 706       *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 707       idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
 708     }
 709 
 710     // Exclude last input arg stack slots to avoid spilling vector register there,
 711     // otherwise vector spills could stomp over stack slots in caller frame.
 712     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 713       scalable_stack_mask.Remove(in);
 714       in = OptoReg::add(in, -1);
 715     }
 716 
 717     // For VecA
 718      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 719      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 720     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 721      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 722   } else {
 723     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 724   }
 725 
 726   if (UseFPUForSpilling) {
 727     // This mask logic assumes that the spill operations are
 728     // symmetric and that the registers involved are the same size.
 729     // On sparc for instance we may have to use 64 bit moves will
 730     // kill 2 registers when used with F0-F31.
 731     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 732     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 733 #ifdef _LP64
 734     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 735     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 736     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 737     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 738 #else
 739     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 740 #ifdef ARM
 741     // ARM has support for moving 64bit values between a pair of
 742     // integer registers and a double register
 743     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 744     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 745 #endif
 746 #endif
 747   }
 748 
 749   // Make up debug masks.  Any spill slot plus callee-save (SOE) registers.
 750   // Caller-save (SOC, AS) registers are assumed to be trashable by the various
 751   // inline-cache fixup routines.
 752   *idealreg2debugmask  [Op_RegN] = *idealreg2spillmask[Op_RegN];
 753   *idealreg2debugmask  [Op_RegI] = *idealreg2spillmask[Op_RegI];
 754   *idealreg2debugmask  [Op_RegL] = *idealreg2spillmask[Op_RegL];
 755   *idealreg2debugmask  [Op_RegF] = *idealreg2spillmask[Op_RegF];
 756   *idealreg2debugmask  [Op_RegD] = *idealreg2spillmask[Op_RegD];
 757   *idealreg2debugmask  [Op_RegP] = *idealreg2spillmask[Op_RegP];
 758   *idealreg2debugmask  [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 759 
 760   *idealreg2debugmask  [Op_VecA] = *idealreg2spillmask[Op_VecA];
 761   *idealreg2debugmask  [Op_VecS] = *idealreg2spillmask[Op_VecS];
 762   *idealreg2debugmask  [Op_VecD] = *idealreg2spillmask[Op_VecD];
 763   *idealreg2debugmask  [Op_VecX] = *idealreg2spillmask[Op_VecX];
 764   *idealreg2debugmask  [Op_VecY] = *idealreg2spillmask[Op_VecY];
 765   *idealreg2debugmask  [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 766 
 767   *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
 768   *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
 769   *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
 770   *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
 771   *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
 772   *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
 773   *idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 774 
 775   *idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
 776   *idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
 777   *idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
 778   *idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
 779   *idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
 780   *idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 781 
 782   // Prevent stub compilations from attempting to reference
 783   // callee-saved (SOE) registers from debug info
 784   bool exclude_soe = !Compile::current()->is_method_compilation();
 785   RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
 786   RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
 787 
 788   idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
 789   idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
 790   idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
 791   idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
 792   idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
 793   idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
 794   idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
 795 
 796   idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
 797   idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
 798   idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
 799   idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
 800   idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
 801   idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
 802 
 803   idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
 804   idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
 805   idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
 806   idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
 807   idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
 808   idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
 809   idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
 810 
 811   idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
 812   idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
 813   idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
 814   idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
 815   idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
 816   idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
 817 }
 818 
 819 //---------------------------is_save_on_entry----------------------------------
 820 bool Matcher::is_save_on_entry(int reg) {
 821   return
 822     _register_save_policy[reg] == 'E' ||
 823     _register_save_policy[reg] == 'A'; // Save-on-entry register?
 824 }
 825 
 826 //---------------------------Fixup_Save_On_Entry-------------------------------
 827 void Matcher::Fixup_Save_On_Entry( ) {
 828   init_first_stack_mask();
 829 
 830   Node *root = C->root();       // Short name for root
 831   // Count number of save-on-entry registers.
 832   uint soe_cnt = number_of_saved_registers();
 833   uint i;
 834 
 835   // Find the procedure Start Node
 836   StartNode *start = C->start();
 837   assert( start, "Expect a start node" );
 838 
 839   // Input RegMask array shared by all Returns.
 840   // The type for doubles and longs has a count of 2, but
 841   // there is only 1 returned value
 842   uint ret_edge_cnt = C->tf()->range_cc()->cnt();
 843   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 844   for (i = TypeFunc::Parms; i < ret_edge_cnt; i++) {
 845     ret_rms[i] = _return_values_mask[i-TypeFunc::Parms];
 846   }
 847 
 848   // Input RegMask array shared by all ForwardExceptions
 849   uint forw_exc_edge_cnt = TypeFunc::Parms;
 850   RegMask* forw_exc_rms  = init_input_masks( forw_exc_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 851 
 852   // Input RegMask array shared by all Rethrows.
 853   uint reth_edge_cnt = TypeFunc::Parms+1;
 854   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 855   // Rethrow takes exception oop only, but in the argument 0 slot.
 856   OptoReg::Name reg = find_receiver();
 857   if (reg >= 0) {
 858     reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
 859 #ifdef _LP64
 860     // Need two slots for ptrs in 64-bit land
 861     reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
 862 #endif
 863   }
 864 
 865   // Input RegMask array shared by all TailCalls
 866   uint tail_call_edge_cnt = TypeFunc::Parms+2;
 867   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 868 
 869   // Input RegMask array shared by all TailJumps
 870   uint tail_jump_edge_cnt = TypeFunc::Parms+2;
 871   RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 872 
 873   // TailCalls have 2 returned values (target & moop), whose masks come
 874   // from the usual MachNode/MachOper mechanism.  Find a sample
 875   // TailCall to extract these masks and put the correct masks into
 876   // the tail_call_rms array.
 877   for( i=1; i < root->req(); i++ ) {
 878     MachReturnNode *m = root->in(i)->as_MachReturn();
 879     if( m->ideal_Opcode() == Op_TailCall ) {
 880       tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 881       tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 882       break;
 883     }
 884   }
 885 
 886   // TailJumps have 2 returned values (target & ex_oop), whose masks come
 887   // from the usual MachNode/MachOper mechanism.  Find a sample
 888   // TailJump to extract these masks and put the correct masks into
 889   // the tail_jump_rms array.
 890   for( i=1; i < root->req(); i++ ) {
 891     MachReturnNode *m = root->in(i)->as_MachReturn();
 892     if( m->ideal_Opcode() == Op_TailJump ) {
 893       tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 894       tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 895       break;
 896     }
 897   }
 898 
 899   // Input RegMask array shared by all Halts
 900   uint halt_edge_cnt = TypeFunc::Parms;
 901   RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 902 
 903   // Capture the return input masks into each exit flavor
 904   for( i=1; i < root->req(); i++ ) {
 905     MachReturnNode *exit = root->in(i)->as_MachReturn();
 906     switch( exit->ideal_Opcode() ) {
 907       case Op_Return   : exit->_in_rms = ret_rms;  break;
 908       case Op_Rethrow  : exit->_in_rms = reth_rms; break;
 909       case Op_TailCall : exit->_in_rms = tail_call_rms; break;
 910       case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
 911       case Op_ForwardException: exit->_in_rms = forw_exc_rms; break;
 912       case Op_Halt     : exit->_in_rms = halt_rms; break;
 913       default          : ShouldNotReachHere();
 914     }
 915   }
 916 
 917   // Next unused projection number from Start.
 918   int proj_cnt = C->tf()->domain_cc()->cnt();
 919 
 920   // Do all the save-on-entry registers.  Make projections from Start for
 921   // them, and give them a use at the exit points.  To the allocator, they
 922   // look like incoming register arguments.
 923   for( i = 0; i < _last_Mach_Reg; i++ ) {
 924     if( is_save_on_entry(i) ) {
 925 
 926       // Add the save-on-entry to the mask array
 927       ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
 928       reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
 929       tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
 930       tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
 931       forw_exc_rms [ forw_exc_edge_cnt] = mreg2regmask[i];
 932       // Halts need the SOE registers, but only in the stack as debug info.
 933       // A just-prior uncommon-trap or deoptimization will use the SOE regs.
 934       halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
 935 
 936       Node *mproj;
 937 
 938       // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
 939       // into a single RegD.
 940       if( (i&1) == 0 &&
 941           _register_save_type[i  ] == Op_RegF &&
 942           _register_save_type[i+1] == Op_RegF &&
 943           is_save_on_entry(i+1) ) {
 944         // Add other bit for double
 945         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 946         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 947         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 948         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 949         forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
 950         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 951         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
 952         proj_cnt += 2;          // Skip 2 for doubles
 953       }
 954       else if( (i&1) == 1 &&    // Else check for high half of double
 955                _register_save_type[i-1] == Op_RegF &&
 956                _register_save_type[i  ] == Op_RegF &&
 957                is_save_on_entry(i-1) ) {
 958         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 959         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 960         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 961         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 962         forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
 963         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 964         mproj = C->top();
 965       }
 966       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
 967       // into a single RegL.
 968       else if( (i&1) == 0 &&
 969           _register_save_type[i  ] == Op_RegI &&
 970           _register_save_type[i+1] == Op_RegI &&
 971         is_save_on_entry(i+1) ) {
 972         // Add other bit for long
 973         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 974         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 975         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 976         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 977         forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
 978         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 979         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
 980         proj_cnt += 2;          // Skip 2 for longs
 981       }
 982       else if( (i&1) == 1 &&    // Else check for high half of long
 983                _register_save_type[i-1] == Op_RegI &&
 984                _register_save_type[i  ] == Op_RegI &&
 985                is_save_on_entry(i-1) ) {
 986         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 987         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 988         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 989         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 990         forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
 991         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 992         mproj = C->top();
 993       } else {
 994         // Make a projection for it off the Start
 995         mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
 996       }
 997 
 998       ret_edge_cnt ++;
 999       reth_edge_cnt ++;
1000       tail_call_edge_cnt ++;
1001       tail_jump_edge_cnt ++;
1002       forw_exc_edge_cnt++;
1003       halt_edge_cnt ++;
1004 
1005       // Add a use of the SOE register to all exit paths
1006       for (uint j=1; j < root->req(); j++) {
1007         root->in(j)->add_req(mproj);
1008       }
1009     } // End of if a save-on-entry register
1010   } // End of for all machine registers
1011 }
1012 
1013 //------------------------------init_spill_mask--------------------------------
1014 void Matcher::init_spill_mask( Node *ret ) {
1015   if( idealreg2regmask[Op_RegI] ) return; // One time only init
1016 
1017   OptoReg::c_frame_pointer = c_frame_pointer();
1018   c_frame_ptr_mask = c_frame_pointer();
1019 #ifdef _LP64
1020   // pointers are twice as big
1021   c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
1022 #endif
1023 
1024   // Start at OptoReg::stack0()
1025   STACK_ONLY_mask.Clear();
1026   OptoReg::Name init = OptoReg::stack2reg(0);
1027   // STACK_ONLY_mask is all stack bits
1028   OptoReg::Name i;
1029   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
1030     STACK_ONLY_mask.Insert(i);
1031   // Also set the "infinite stack" bit.
1032   STACK_ONLY_mask.set_AllStack();
1033 
1034   for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
1035     // Copy the register names over into the shared world.
1036     // SharedInfo::regName[i] = regName[i];
1037     // Handy RegMasks per machine register
1038     mreg2regmask[i].Insert(i);
1039 
1040     // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
1041     if (_register_save_policy[i] == 'C' ||
1042         _register_save_policy[i] == 'A') {
1043       caller_save_regmask.Insert(i);
1044       mh_caller_save_regmask.Insert(i);
1045     }
1046     // Exclude save-on-entry registers from debug masks for stub compilations.
1047     if (_register_save_policy[i] == 'C' ||
1048         _register_save_policy[i] == 'A' ||
1049         _register_save_policy[i] == 'E') {
1050       caller_save_regmask_exclude_soe.Insert(i);
1051       mh_caller_save_regmask_exclude_soe.Insert(i);
1052     }
1053   }
1054 
1055   // Also exclude the register we use to save the SP for MethodHandle
1056   // invokes to from the corresponding MH debug masks
1057   const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
1058   mh_caller_save_regmask.OR(sp_save_mask);
1059   mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
1060 
1061   // Grab the Frame Pointer
1062   Node *fp  = ret->in(TypeFunc::FramePtr);
1063   // Share frame pointer while making spill ops
1064   set_shared(fp);
1065 
1066 // Get the ADLC notion of the right regmask, for each basic type.
1067 #ifdef _LP64
1068   idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
1069 #endif
1070   idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
1071   idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
1072   idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
1073   idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
1074   idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
1075   idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
1076   idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
1077   idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
1078   idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
1079   idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
1080   idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
1081   idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
1082 }
1083 
1084 #ifdef ASSERT
1085 static void match_alias_type(Compile* C, Node* n, Node* m) {
1086   if (!VerifyAliases)  return;  // do not go looking for trouble by default
1087   const TypePtr* nat = n->adr_type();
1088   const TypePtr* mat = m->adr_type();
1089   int nidx = C->get_alias_index(nat);
1090   int midx = C->get_alias_index(mat);
1091   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
1092   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
1093     for (uint i = 1; i < n->req(); i++) {
1094       Node* n1 = n->in(i);
1095       const TypePtr* n1at = n1->adr_type();
1096       if (n1at != nullptr) {
1097         nat = n1at;
1098         nidx = C->get_alias_index(n1at);
1099       }
1100     }
1101   }
1102   // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
1103   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1104     switch (n->Opcode()) {
1105     case Op_PrefetchAllocation:
1106       nidx = Compile::AliasIdxRaw;
1107       nat = TypeRawPtr::BOTTOM;
1108       break;
1109     }
1110   }
1111   if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1112     switch (n->Opcode()) {
1113     case Op_ClearArray:
1114       midx = Compile::AliasIdxRaw;
1115       mat = TypeRawPtr::BOTTOM;
1116       break;
1117     }
1118   }
1119   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1120     switch (n->Opcode()) {
1121     case Op_Return:
1122     case Op_Rethrow:
1123     case Op_Halt:
1124     case Op_TailCall:
1125     case Op_TailJump:
1126     case Op_ForwardException:
1127       nidx = Compile::AliasIdxBot;
1128       nat = TypePtr::BOTTOM;
1129       break;
1130     }
1131   }
1132   if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1133     switch (n->Opcode()) {
1134     case Op_StrComp:
1135     case Op_StrEquals:
1136     case Op_StrIndexOf:
1137     case Op_StrIndexOfChar:
1138     case Op_AryEq:
1139     case Op_VectorizedHashCode:
1140     case Op_CountPositives:
1141     case Op_MemBarVolatile:
1142     case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1143     case Op_StrInflatedCopy:
1144     case Op_StrCompressedCopy:
1145     case Op_OnSpinWait:
1146     case Op_EncodeISOArray:
1147       nidx = Compile::AliasIdxTop;
1148       nat = nullptr;
1149       break;
1150     }
1151   }
1152   if (nidx != midx) {
1153     if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1154       tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1155       n->dump();
1156       m->dump();
1157     }
1158     assert(C->subsume_loads() && C->must_alias(nat, midx),
1159            "must not lose alias info when matching");
1160   }
1161 }
1162 #endif
1163 
1164 //------------------------------xform------------------------------------------
1165 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1166 // Node in new-space.  Given a new-space Node, recursively walk his children.
1167 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1168 Node *Matcher::xform( Node *n, int max_stack ) {
1169   // Use one stack to keep both: child's node/state and parent's node/index
1170   MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1171   mstack.push(n, Visit, nullptr, -1);  // set null as parent to indicate root
1172   while (mstack.is_nonempty()) {
1173     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1174     if (C->failing()) return nullptr;
1175     n = mstack.node();          // Leave node on stack
1176     Node_State nstate = mstack.state();
1177     if (nstate == Visit) {
1178       mstack.set_state(Post_Visit);
1179       Node *oldn = n;
1180       // Old-space or new-space check
1181       if (!C->node_arena()->contains(n)) {
1182         // Old space!
1183         Node* m;
1184         if (has_new_node(n)) {  // Not yet Label/Reduced
1185           m = new_node(n);
1186         } else {
1187           if (!is_dontcare(n)) { // Matcher can match this guy
1188             // Calls match special.  They match alone with no children.
1189             // Their children, the incoming arguments, match normally.
1190             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1191             if (C->failing())  return nullptr;
1192             if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; }
1193             if (n->is_MemBar()) {
1194               m->as_MachMemBar()->set_adr_type(n->adr_type());
1195             }
1196           } else {                  // Nothing the matcher cares about
1197             if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) {       // Projections?
1198               // Convert to machine-dependent projection
1199               RegMask* mask = nullptr;
1200               if (n->in(0)->is_Call() && n->in(0)->as_Call()->tf()->returns_inline_type_as_fields()) {
1201                 mask = return_values_mask(n->in(0)->as_Call()->tf());
1202               }
1203               m = n->in(0)->as_Multi()->match(n->as_Proj(), this, mask);
1204               NOT_PRODUCT(record_new2old(m, n);)
1205               if (m->in(0) != nullptr) // m might be top
1206                 collect_null_checks(m, n);
1207             } else {                // Else just a regular 'ol guy
1208               m = n->clone();       // So just clone into new-space
1209               NOT_PRODUCT(record_new2old(m, n);)
1210               // Def-Use edges will be added incrementally as Uses
1211               // of this node are matched.
1212               assert(m->outcnt() == 0, "no Uses of this clone yet");
1213             }
1214           }
1215 
1216           set_new_node(n, m);       // Map old to new
1217           if (_old_node_note_array != nullptr) {
1218             Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1219                                                   n->_idx);
1220             C->set_node_notes_at(m->_idx, nn);
1221           }
1222           debug_only(match_alias_type(C, n, m));
1223         }
1224         n = m;    // n is now a new-space node
1225         mstack.set_node(n);
1226       }
1227 
1228       // New space!
1229       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1230 
1231       int i;
1232       // Put precedence edges on stack first (match them last).
1233       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1234         Node *m = oldn->in(i);
1235         if (m == nullptr) break;
1236         // set -1 to call add_prec() instead of set_req() during Step1
1237         mstack.push(m, Visit, n, -1);
1238       }
1239 
1240       // Handle precedence edges for interior nodes
1241       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1242         Node *m = n->in(i);
1243         if (m == nullptr || C->node_arena()->contains(m)) continue;
1244         n->rm_prec(i);
1245         // set -1 to call add_prec() instead of set_req() during Step1
1246         mstack.push(m, Visit, n, -1);
1247       }
1248 
1249       // For constant debug info, I'd rather have unmatched constants.
1250       int cnt = n->req();
1251       JVMState* jvms = n->jvms();
1252       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1253 
1254       // Now do only debug info.  Clone constants rather than matching.
1255       // Constants are represented directly in the debug info without
1256       // the need for executable machine instructions.
1257       // Monitor boxes are also represented directly.
1258       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1259         Node *m = n->in(i);          // Get input
1260         int op = m->Opcode();
1261         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1262         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1263             op == Op_ConF || op == Op_ConD || op == Op_ConL
1264             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1265             ) {
1266           m = m->clone();
1267           NOT_PRODUCT(record_new2old(m, n));
1268           mstack.push(m, Post_Visit, n, i); // Don't need to visit
1269           mstack.push(m->in(0), Visit, m, 0);
1270         } else {
1271           mstack.push(m, Visit, n, i);
1272         }
1273       }
1274 
1275       // And now walk his children, and convert his inputs to new-space.
1276       for( ; i >= 0; --i ) { // For all normal inputs do
1277         Node *m = n->in(i);  // Get input
1278         if(m != nullptr)
1279           mstack.push(m, Visit, n, i);
1280       }
1281 
1282     }
1283     else if (nstate == Post_Visit) {
1284       // Set xformed input
1285       Node *p = mstack.parent();
1286       if (p != nullptr) { // root doesn't have parent
1287         int i = (int)mstack.index();
1288         if (i >= 0)
1289           p->set_req(i, n); // required input
1290         else if (i == -1)
1291           p->add_prec(n);   // precedence input
1292         else
1293           ShouldNotReachHere();
1294       }
1295       mstack.pop(); // remove processed node from stack
1296     }
1297     else {
1298       ShouldNotReachHere();
1299     }
1300   } // while (mstack.is_nonempty())
1301   return n; // Return new-space Node
1302 }
1303 
1304 //------------------------------warp_outgoing_stk_arg------------------------
1305 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1306   // Convert outgoing argument location to a pre-biased stack offset
1307   if (reg->is_stack()) {
1308     OptoReg::Name warped = reg->reg2stack();
1309     // Adjust the stack slot offset to be the register number used
1310     // by the allocator.
1311     warped = OptoReg::add(begin_out_arg_area, warped);
1312     // Keep track of the largest numbered stack slot used for an arg.
1313     // Largest used slot per call-site indicates the amount of stack
1314     // that is killed by the call.
1315     if( warped >= out_arg_limit_per_call )
1316       out_arg_limit_per_call = OptoReg::add(warped,1);
1317     if (!RegMask::can_represent_arg(warped)) {
1318       // Bailout. For example not enough space on stack for all arguments. Happens for methods with too many arguments.
1319       C->record_method_not_compilable("unsupported calling sequence");
1320       return OptoReg::Bad;
1321     }
1322     return warped;
1323   }
1324   return OptoReg::as_OptoReg(reg);
1325 }
1326 
1327 
1328 //------------------------------match_sfpt-------------------------------------
1329 // Helper function to match call instructions.  Calls match special.
1330 // They match alone with no children.  Their children, the incoming
1331 // arguments, match normally.
1332 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1333   MachSafePointNode *msfpt = nullptr;
1334   MachCallNode      *mcall = nullptr;
1335   uint               cnt;
1336   // Split out case for SafePoint vs Call
1337   CallNode *call;
1338   const TypeTuple *domain;
1339   ciMethod*        method = nullptr;
1340   bool             is_method_handle_invoke = false;  // for special kill effects
1341   if( sfpt->is_Call() ) {
1342     call = sfpt->as_Call();
1343     domain = call->tf()->domain_cc();
1344     cnt = domain->cnt();
1345 
1346     // Match just the call, nothing else
1347     MachNode *m = match_tree(call);
1348     if (C->failing())  return nullptr;
1349     if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; }
1350 
1351     // Copy data from the Ideal SafePoint to the machine version
1352     mcall = m->as_MachCall();
1353 
1354     mcall->set_tf(                  call->tf());
1355     mcall->set_entry_point(         call->entry_point());
1356     mcall->set_cnt(                 call->cnt());
1357     mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1358 
1359     if( mcall->is_MachCallJava() ) {
1360       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1361       const CallJavaNode *call_java =  call->as_CallJava();
1362       assert(call_java->validate_symbolic_info(), "inconsistent info");
1363       method = call_java->method();
1364       mcall_java->_method = method;
1365       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1366       is_method_handle_invoke = call_java->is_method_handle_invoke();
1367       mcall_java->_method_handle_invoke = is_method_handle_invoke;
1368       mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1369       mcall_java->_arg_escape = call_java->arg_escape();
1370       if (is_method_handle_invoke) {
1371         C->set_has_method_handle_invokes(true);
1372       }
1373       if( mcall_java->is_MachCallStaticJava() )
1374         mcall_java->as_MachCallStaticJava()->_name =
1375          call_java->as_CallStaticJava()->_name;
1376       if( mcall_java->is_MachCallDynamicJava() )
1377         mcall_java->as_MachCallDynamicJava()->_vtable_index =
1378          call_java->as_CallDynamicJava()->_vtable_index;
1379     }
1380     else if( mcall->is_MachCallRuntime() ) {
1381       MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1382       mach_call_rt->_name = call->as_CallRuntime()->_name;
1383       mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1384     }
1385     msfpt = mcall;
1386   }
1387   // This is a non-call safepoint
1388   else {
1389     call = nullptr;
1390     domain = nullptr;
1391     MachNode *mn = match_tree(sfpt);
1392     if (C->failing())  return nullptr;
1393     msfpt = mn->as_MachSafePoint();
1394     cnt = TypeFunc::Parms;
1395   }
1396   msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1397 
1398   // Advertise the correct memory effects (for anti-dependence computation).
1399   msfpt->set_adr_type(sfpt->adr_type());
1400 
1401   // Allocate a private array of RegMasks.  These RegMasks are not shared.
1402   msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1403   // Empty them all.
1404   for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1405 
1406   // Do all the pre-defined non-Empty register masks
1407   msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1408   msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1409 
1410   // Place first outgoing argument can possibly be put.
1411   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1412   assert( is_even(begin_out_arg_area), "" );
1413   // Compute max outgoing register number per call site.
1414   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1415   // Calls to C may hammer extra stack slots above and beyond any arguments.
1416   // These are usually backing store for register arguments for varargs.
1417   if( call != nullptr && call->is_CallRuntime() )
1418     out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1419 
1420 
1421   // Do the normal argument list (parameters) register masks
1422   // Null entry point is a special cast where the target of the call
1423   // is in a register.
1424   int adj = (call != nullptr && call->entry_point() == nullptr) ? 1 : 0;
1425   int argcnt = cnt - TypeFunc::Parms - adj;
1426   if( argcnt > 0 ) {          // Skip it all if we have no args
1427     BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1428     VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1429     int i;
1430     for( i = 0; i < argcnt; i++ ) {
1431       sig_bt[i] = domain->field_at(i+TypeFunc::Parms+adj)->basic_type();
1432     }
1433     // V-call to pick proper calling convention
1434     call->calling_convention( sig_bt, parm_regs, argcnt );
1435 
1436 #ifdef ASSERT
1437     // Sanity check users' calling convention.  Really handy during
1438     // the initial porting effort.  Fairly expensive otherwise.
1439     { for (int i = 0; i<argcnt; i++) {
1440       if( !parm_regs[i].first()->is_valid() &&
1441           !parm_regs[i].second()->is_valid() ) continue;
1442       VMReg reg1 = parm_regs[i].first();
1443       VMReg reg2 = parm_regs[i].second();
1444       for (int j = 0; j < i; j++) {
1445         if( !parm_regs[j].first()->is_valid() &&
1446             !parm_regs[j].second()->is_valid() ) continue;
1447         VMReg reg3 = parm_regs[j].first();
1448         VMReg reg4 = parm_regs[j].second();
1449         if( !reg1->is_valid() ) {
1450           assert( !reg2->is_valid(), "valid halvsies" );
1451         } else if( !reg3->is_valid() ) {
1452           assert( !reg4->is_valid(), "valid halvsies" );
1453         } else {
1454           assert( reg1 != reg2, "calling conv. must produce distinct regs");
1455           assert( reg1 != reg3, "calling conv. must produce distinct regs");
1456           assert( reg1 != reg4, "calling conv. must produce distinct regs");
1457           assert( reg2 != reg3, "calling conv. must produce distinct regs");
1458           assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1459           assert( reg3 != reg4, "calling conv. must produce distinct regs");
1460         }
1461       }
1462     }
1463     }
1464 #endif
1465 
1466     // Visit each argument.  Compute its outgoing register mask.
1467     // Return results now can have 2 bits returned.
1468     // Compute max over all outgoing arguments both per call-site
1469     // and over the entire method.
1470     for( i = 0; i < argcnt; i++ ) {
1471       // Address of incoming argument mask to fill in
1472       RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms+adj];
1473       VMReg first = parm_regs[i].first();
1474       VMReg second = parm_regs[i].second();
1475       if(!first->is_valid() &&
1476          !second->is_valid()) {
1477         continue;               // Avoid Halves
1478       }
1479       // Handle case where arguments are in vector registers.
1480       if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1481         OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1482         OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1483         assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1484         for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1485           rm->Insert(r);
1486         }
1487       }
1488       // Grab first register, adjust stack slots and insert in mask.
1489       OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1490       if (C->failing()) {
1491         return nullptr;
1492       }
1493       if (OptoReg::is_valid(reg1)) {
1494         rm->Insert( reg1 );
1495       }
1496       // Grab second register (if any), adjust stack slots and insert in mask.
1497       OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1498       if (C->failing()) {
1499         return nullptr;
1500       }
1501       if (OptoReg::is_valid(reg2)) {
1502         rm->Insert( reg2 );
1503       }
1504     } // End of for all arguments
1505   }
1506 
1507   // Compute the max stack slot killed by any call.  These will not be
1508   // available for debug info, and will be used to adjust FIRST_STACK_mask
1509   // after all call sites have been visited.
1510   if( _out_arg_limit < out_arg_limit_per_call)
1511     _out_arg_limit = out_arg_limit_per_call;
1512 
1513   if (mcall) {
1514     // Kill the outgoing argument area, including any non-argument holes and
1515     // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1516     // Since the max-per-method covers the max-per-call-site and debug info
1517     // is excluded on the max-per-method basis, debug info cannot land in
1518     // this killed area.
1519     uint r_cnt = mcall->tf()->range_sig()->cnt();
1520     MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1521     if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1522       // Bailout. We do not have space to represent all arguments.
1523       C->record_method_not_compilable("unsupported outgoing calling sequence");
1524     } else {
1525       for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1526         proj->_rout.Insert(OptoReg::Name(i));
1527     }
1528     if (proj->_rout.is_NotEmpty()) {
1529       push_projection(proj);
1530     }
1531   }
1532   // Transfer the safepoint information from the call to the mcall
1533   // Move the JVMState list
1534   msfpt->set_jvms(sfpt->jvms());
1535   for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1536     jvms->set_map(sfpt);
1537   }
1538 
1539   // Debug inputs begin just after the last incoming parameter
1540   assert((mcall == nullptr) || (mcall->jvms() == nullptr) ||
1541          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain_cc()->cnt()), "");
1542 
1543   // Add additional edges.
1544   if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1545     // For these calls we can not add MachConstantBase in expand(), as the
1546     // ins are not complete then.
1547     msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1548     if (msfpt->jvms() &&
1549         msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1550       // We added an edge before jvms, so we must adapt the position of the ins.
1551       msfpt->jvms()->adapt_position(+1);
1552     }
1553   }
1554 
1555   // Registers killed by the call are set in the local scheduling pass
1556   // of Global Code Motion.
1557   return msfpt;
1558 }
1559 
1560 //---------------------------match_tree----------------------------------------
1561 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1562 // of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1563 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1564 // a Load's result RegMask for memoization in idealreg2regmask[]
1565 MachNode *Matcher::match_tree( const Node *n ) {
1566   assert( n->Opcode() != Op_Phi, "cannot match" );
1567   assert( !n->is_block_start(), "cannot match" );
1568   // Set the mark for all locally allocated State objects.
1569   // When this call returns, the _states_arena arena will be reset
1570   // freeing all State objects.
1571   ResourceMark rm( &_states_arena );
1572 
1573   LabelRootDepth = 0;
1574 
1575   // StoreNodes require their Memory input to match any LoadNodes
1576   Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1577 #ifdef ASSERT
1578   Node* save_mem_node = _mem_node;
1579   _mem_node = n->is_Store() ? (Node*)n : nullptr;
1580 #endif
1581   // State object for root node of match tree
1582   // Allocate it on _states_arena - stack allocation can cause stack overflow.
1583   State *s = new (&_states_arena) State;
1584   s->_kids[0] = nullptr;
1585   s->_kids[1] = nullptr;
1586   s->_leaf = (Node*)n;
1587   // Label the input tree, allocating labels from top-level arena
1588   Node* root_mem = mem;
1589   Label_Root(n, s, n->in(0), root_mem);
1590   if (C->failing())  return nullptr;
1591 
1592   // The minimum cost match for the whole tree is found at the root State
1593   uint mincost = max_juint;
1594   uint cost = max_juint;
1595   uint i;
1596   for (i = 0; i < NUM_OPERANDS; i++) {
1597     if (s->valid(i) &&               // valid entry and
1598         s->cost(i) < cost &&         // low cost and
1599         s->rule(i) >= NUM_OPERANDS) {// not an operand
1600       mincost = i;
1601       cost = s->cost(i);
1602     }
1603   }
1604   if (mincost == max_juint) {
1605 #ifndef PRODUCT
1606     tty->print("No matching rule for:");
1607     s->dump();
1608 #endif
1609     Matcher::soft_match_failure();
1610     return nullptr;
1611   }
1612   // Reduce input tree based upon the state labels to machine Nodes
1613   MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1614   // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1615   NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1616 
1617   // Add any Matcher-ignored edges
1618   uint cnt = n->req();
1619   uint start = 1;
1620   if( mem != (Node*)1 ) start = MemNode::Memory+1;
1621   if( n->is_AddP() ) {
1622     assert( mem == (Node*)1, "" );
1623     start = AddPNode::Base+1;
1624   }
1625   for( i = start; i < cnt; i++ ) {
1626     if( !n->match_edge(i) ) {
1627       if( i < m->req() )
1628         m->ins_req( i, n->in(i) );
1629       else
1630         m->add_req( n->in(i) );
1631     }
1632   }
1633 
1634   debug_only( _mem_node = save_mem_node; )
1635   return m;
1636 }
1637 
1638 
1639 //------------------------------match_into_reg---------------------------------
1640 // Choose to either match this Node in a register or part of the current
1641 // match tree.  Return true for requiring a register and false for matching
1642 // as part of the current match tree.
1643 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1644 
1645   const Type *t = m->bottom_type();
1646 
1647   if (t->singleton()) {
1648     // Never force constants into registers.  Allow them to match as
1649     // constants or registers.  Copies of the same value will share
1650     // the same register.  See find_shared_node.
1651     return false;
1652   } else {                      // Not a constant
1653     if (!shared && Matcher::is_encode_and_store_pattern(n, m)) {
1654       // Make it possible to match "encode and store" patterns with non-shared
1655       // encode operations that are pinned to a control node (e.g. by CastPP
1656       // node removal in final graph reshaping). The matched instruction cannot
1657       // float above the encode's control node because it is pinned to the
1658       // store's control node.
1659       return false;
1660     }
1661     // Stop recursion if they have different Controls.
1662     Node* m_control = m->in(0);
1663     // Control of load's memory can post-dominates load's control.
1664     // So use it since load can't float above its memory.
1665     Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr;
1666     if (control && m_control && control != m_control && control != mem_control) {
1667 
1668       // Actually, we can live with the most conservative control we
1669       // find, if it post-dominates the others.  This allows us to
1670       // pick up load/op/store trees where the load can float a little
1671       // above the store.
1672       Node *x = control;
1673       const uint max_scan = 6;  // Arbitrary scan cutoff
1674       uint j;
1675       for (j=0; j<max_scan; j++) {
1676         if (x->is_Region())     // Bail out at merge points
1677           return true;
1678         x = x->in(0);
1679         if (x == m_control)     // Does 'control' post-dominate
1680           break;                // m->in(0)?  If so, we can use it
1681         if (x == mem_control)   // Does 'control' post-dominate
1682           break;                // mem_control?  If so, we can use it
1683       }
1684       if (j == max_scan)        // No post-domination before scan end?
1685         return true;            // Then break the match tree up
1686     }
1687     if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1688         (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1689       // These are commonly used in address expressions and can
1690       // efficiently fold into them on X64 in some cases.
1691       return false;
1692     }
1693   }
1694 
1695   // Not forceable cloning.  If shared, put it into a register.
1696   return shared;
1697 }
1698 
1699 
1700 //------------------------------Instruction Selection--------------------------
1701 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1702 // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1703 // things the Matcher does not match (e.g., Memory), and things with different
1704 // Controls (hence forced into different blocks).  We pass in the Control
1705 // selected for this entire State tree.
1706 
1707 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1708 // Store and the Load must have identical Memories (as well as identical
1709 // pointers).  Since the Matcher does not have anything for Memory (and
1710 // does not handle DAGs), I have to match the Memory input myself.  If the
1711 // Tree root is a Store or if there are multiple Loads in the tree, I require
1712 // all Loads to have the identical memory.
1713 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1714   // Since Label_Root is a recursive function, its possible that we might run
1715   // out of stack space.  See bugs 6272980 & 6227033 for more info.
1716   LabelRootDepth++;
1717   if (LabelRootDepth > MaxLabelRootDepth) {
1718     // Bailout. Can for example be hit with a deep chain of operations.
1719     C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1720     return nullptr;
1721   }
1722   uint care = 0;                // Edges matcher cares about
1723   uint cnt = n->req();
1724   uint i = 0;
1725 
1726   // Examine children for memory state
1727   // Can only subsume a child into your match-tree if that child's memory state
1728   // is not modified along the path to another input.
1729   // It is unsafe even if the other inputs are separate roots.
1730   Node *input_mem = nullptr;
1731   for( i = 1; i < cnt; i++ ) {
1732     if( !n->match_edge(i) ) continue;
1733     Node *m = n->in(i);         // Get ith input
1734     assert( m, "expect non-null children" );
1735     if( m->is_Load() ) {
1736       if( input_mem == nullptr ) {
1737         input_mem = m->in(MemNode::Memory);
1738         if (mem == (Node*)1) {
1739           // Save this memory to bail out if there's another memory access
1740           // to a different memory location in the same tree.
1741           mem = input_mem;
1742         }
1743       } else if( input_mem != m->in(MemNode::Memory) ) {
1744         input_mem = NodeSentinel;
1745       }
1746     }
1747   }
1748 
1749   for( i = 1; i < cnt; i++ ){// For my children
1750     if( !n->match_edge(i) ) continue;
1751     Node *m = n->in(i);         // Get ith input
1752     // Allocate states out of a private arena
1753     State *s = new (&_states_arena) State;
1754     svec->_kids[care++] = s;
1755     assert( care <= 2, "binary only for now" );
1756 
1757     // Recursively label the State tree.
1758     s->_kids[0] = nullptr;
1759     s->_kids[1] = nullptr;
1760     s->_leaf = m;
1761 
1762     // Check for leaves of the State Tree; things that cannot be a part of
1763     // the current tree.  If it finds any, that value is matched as a
1764     // register operand.  If not, then the normal matching is used.
1765     if( match_into_reg(n, m, control, i, is_shared(m)) ||
1766         // Stop recursion if this is a LoadNode and there is another memory access
1767         // to a different memory location in the same tree (for example, a StoreNode
1768         // at the root of this tree or another LoadNode in one of the children).
1769         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1770         // Can NOT include the match of a subtree when its memory state
1771         // is used by any of the other subtrees
1772         (input_mem == NodeSentinel) ) {
1773       // Print when we exclude matching due to different memory states at input-loads
1774       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1775           && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1776         tty->print_cr("invalid input_mem");
1777       }
1778       // Switch to a register-only opcode; this value must be in a register
1779       // and cannot be subsumed as part of a larger instruction.
1780       s->DFA( m->ideal_reg(), m );
1781 
1782     } else {
1783       // If match tree has no control and we do, adopt it for entire tree
1784       if( control == nullptr && m->in(0) != nullptr && m->req() > 1 )
1785         control = m->in(0);         // Pick up control
1786       // Else match as a normal part of the match tree.
1787       control = Label_Root(m, s, control, mem);
1788       if (C->failing()) return nullptr;
1789     }
1790   }
1791 
1792   // Call DFA to match this node, and return
1793   svec->DFA( n->Opcode(), n );
1794 
1795 #ifdef ASSERT
1796   uint x;
1797   for( x = 0; x < _LAST_MACH_OPER; x++ )
1798     if( svec->valid(x) )
1799       break;
1800 
1801   if (x >= _LAST_MACH_OPER) {
1802     n->dump();
1803     svec->dump();
1804     assert( false, "bad AD file" );
1805   }
1806 #endif
1807   return control;
1808 }
1809 
1810 
1811 // Con nodes reduced using the same rule can share their MachNode
1812 // which reduces the number of copies of a constant in the final
1813 // program.  The register allocator is free to split uses later to
1814 // split live ranges.
1815 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1816   if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr;
1817 
1818   // See if this Con has already been reduced using this rule.
1819   if (_shared_nodes.max() <= leaf->_idx) return nullptr;
1820   MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1821   if (last != nullptr && rule == last->rule()) {
1822     // Don't expect control change for DecodeN
1823     if (leaf->is_DecodeNarrowPtr())
1824       return last;
1825     // Get the new space root.
1826     Node* xroot = new_node(C->root());
1827     if (xroot == nullptr) {
1828       // This shouldn't happen give the order of matching.
1829       return nullptr;
1830     }
1831 
1832     // Shared constants need to have their control be root so they
1833     // can be scheduled properly.
1834     Node* control = last->in(0);
1835     if (control != xroot) {
1836       if (control == nullptr || control == C->root()) {
1837         last->set_req(0, xroot);
1838       } else {
1839         assert(false, "unexpected control");
1840         return nullptr;
1841       }
1842     }
1843     return last;
1844   }
1845   return nullptr;
1846 }
1847 
1848 
1849 //------------------------------ReduceInst-------------------------------------
1850 // Reduce a State tree (with given Control) into a tree of MachNodes.
1851 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1852 // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1853 // Each MachNode has a number of complicated MachOper operands; each
1854 // MachOper also covers a further tree of Ideal Nodes.
1855 
1856 // The root of the Ideal match tree is always an instruction, so we enter
1857 // the recursion here.  After building the MachNode, we need to recurse
1858 // the tree checking for these cases:
1859 // (1) Child is an instruction -
1860 //     Build the instruction (recursively), add it as an edge.
1861 //     Build a simple operand (register) to hold the result of the instruction.
1862 // (2) Child is an interior part of an instruction -
1863 //     Skip over it (do nothing)
1864 // (3) Child is the start of a operand -
1865 //     Build the operand, place it inside the instruction
1866 //     Call ReduceOper.
1867 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1868   assert( rule >= NUM_OPERANDS, "called with operand rule" );
1869 
1870   MachNode* shared_node = find_shared_node(s->_leaf, rule);
1871   if (shared_node != nullptr) {
1872     return shared_node;
1873   }
1874 
1875   // Build the object to represent this state & prepare for recursive calls
1876   MachNode *mach = s->MachNodeGenerator(rule);
1877   guarantee(mach != nullptr, "Missing MachNode");
1878   mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1879   assert( mach->_opnds[0] != nullptr, "Missing result operand" );
1880   Node *leaf = s->_leaf;
1881   NOT_PRODUCT(record_new2old(mach, leaf);)
1882   // Check for instruction or instruction chain rule
1883   if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1884     assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1885            "duplicating node that's already been matched");
1886     // Instruction
1887     mach->add_req( leaf->in(0) ); // Set initial control
1888     // Reduce interior of complex instruction
1889     ReduceInst_Interior( s, rule, mem, mach, 1 );
1890   } else {
1891     // Instruction chain rules are data-dependent on their inputs
1892     mach->add_req(nullptr);     // Set initial control to none
1893     ReduceInst_Chain_Rule( s, rule, mem, mach );
1894   }
1895 
1896   // If a Memory was used, insert a Memory edge
1897   if( mem != (Node*)1 ) {
1898     mach->ins_req(MemNode::Memory,mem);
1899 #ifdef ASSERT
1900     // Verify adr type after matching memory operation
1901     const MachOper* oper = mach->memory_operand();
1902     if (oper != nullptr && oper != (MachOper*)-1) {
1903       // It has a unique memory operand.  Find corresponding ideal mem node.
1904       Node* m = nullptr;
1905       if (leaf->is_Mem()) {
1906         m = leaf;
1907       } else {
1908         m = _mem_node;
1909         assert(m != nullptr && m->is_Mem(), "expecting memory node");
1910       }
1911       const Type* mach_at = mach->adr_type();
1912       // DecodeN node consumed by an address may have different type
1913       // than its input. Don't compare types for such case.
1914       if (m->adr_type() != mach_at &&
1915           (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1916            (m->in(MemNode::Address)->is_AddP() &&
1917             m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1918            (m->in(MemNode::Address)->is_AddP() &&
1919             m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1920             m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1921         mach_at = m->adr_type();
1922       }
1923       if (m->adr_type() != mach_at) {
1924         m->dump();
1925         tty->print_cr("mach:");
1926         mach->dump(1);
1927       }
1928       assert(m->adr_type() == mach_at, "matcher should not change adr type");
1929     }
1930 #endif
1931   }
1932 
1933   // If the _leaf is an AddP, insert the base edge
1934   if (leaf->is_AddP()) {
1935     mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1936   }
1937 
1938   uint number_of_projections_prior = number_of_projections();
1939 
1940   // Perform any 1-to-many expansions required
1941   MachNode *ex = mach->Expand(s, _projection_list, mem);
1942   if (ex != mach) {
1943     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1944     if( ex->in(1)->is_Con() )
1945       ex->in(1)->set_req(0, C->root());
1946     // Remove old node from the graph
1947     for( uint i=0; i<mach->req(); i++ ) {
1948       mach->set_req(i,nullptr);
1949     }
1950     NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1951   }
1952 
1953   // PhaseChaitin::fixup_spills will sometimes generate spill code
1954   // via the matcher.  By the time, nodes have been wired into the CFG,
1955   // and any further nodes generated by expand rules will be left hanging
1956   // in space, and will not get emitted as output code.  Catch this.
1957   // Also, catch any new register allocation constraints ("projections")
1958   // generated belatedly during spill code generation.
1959   if (_allocation_started) {
1960     guarantee(ex == mach, "no expand rules during spill generation");
1961     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1962   }
1963 
1964   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1965     // Record the con for sharing
1966     _shared_nodes.map(leaf->_idx, ex);
1967   }
1968 
1969   // Have mach nodes inherit GC barrier data
1970   mach->set_barrier_data(MemNode::barrier_data(leaf));
1971 
1972   return ex;
1973 }
1974 
1975 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1976   for (uint i = n->req(); i < n->len(); i++) {
1977     if (n->in(i) != nullptr) {
1978       mach->add_prec(n->in(i));
1979     }
1980   }
1981 }
1982 
1983 void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1984   // 'op' is what I am expecting to receive
1985   int op = _leftOp[rule];
1986   // Operand type to catch childs result
1987   // This is what my child will give me.
1988   unsigned int opnd_class_instance = s->rule(op);
1989   // Choose between operand class or not.
1990   // This is what I will receive.
1991   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1992   // New rule for child.  Chase operand classes to get the actual rule.
1993   unsigned int newrule = s->rule(catch_op);
1994 
1995   if (newrule < NUM_OPERANDS) {
1996     // Chain from operand or operand class, may be output of shared node
1997     assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1998     // Insert operand into array of operands for this instruction
1999     mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
2000 
2001     ReduceOper(s, newrule, mem, mach);
2002   } else {
2003     // Chain from the result of an instruction
2004     assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
2005     mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
2006     Node *mem1 = (Node*)1;
2007     debug_only(Node *save_mem_node = _mem_node;)
2008     mach->add_req( ReduceInst(s, newrule, mem1) );
2009     debug_only(_mem_node = save_mem_node;)
2010   }
2011   return;
2012 }
2013 
2014 
2015 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
2016   handle_precedence_edges(s->_leaf, mach);
2017 
2018   if( s->_leaf->is_Load() ) {
2019     Node *mem2 = s->_leaf->in(MemNode::Memory);
2020     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
2021     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
2022     mem = mem2;
2023   }
2024   if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
2025     if( mach->in(0) == nullptr )
2026       mach->set_req(0, s->_leaf->in(0));
2027   }
2028 
2029   // Now recursively walk the state tree & add operand list.
2030   for( uint i=0; i<2; i++ ) {   // binary tree
2031     State *newstate = s->_kids[i];
2032     if( newstate == nullptr ) break;      // Might only have 1 child
2033     // 'op' is what I am expecting to receive
2034     int op;
2035     if( i == 0 ) {
2036       op = _leftOp[rule];
2037     } else {
2038       op = _rightOp[rule];
2039     }
2040     // Operand type to catch childs result
2041     // This is what my child will give me.
2042     int opnd_class_instance = newstate->rule(op);
2043     // Choose between operand class or not.
2044     // This is what I will receive.
2045     int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
2046     // New rule for child.  Chase operand classes to get the actual rule.
2047     int newrule = newstate->rule(catch_op);
2048 
2049     if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
2050       // Operand/operandClass
2051       // Insert operand into array of operands for this instruction
2052       mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
2053       ReduceOper(newstate, newrule, mem, mach);
2054 
2055     } else {                    // Child is internal operand or new instruction
2056       if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
2057         // internal operand --> call ReduceInst_Interior
2058         // Interior of complex instruction.  Do nothing but recurse.
2059         num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
2060       } else {
2061         // instruction --> call build operand(  ) to catch result
2062         //             --> ReduceInst( newrule )
2063         mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
2064         Node *mem1 = (Node*)1;
2065         debug_only(Node *save_mem_node = _mem_node;)
2066         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
2067         debug_only(_mem_node = save_mem_node;)
2068       }
2069     }
2070     assert( mach->_opnds[num_opnds-1], "" );
2071   }
2072   return num_opnds;
2073 }
2074 
2075 // This routine walks the interior of possible complex operands.
2076 // At each point we check our children in the match tree:
2077 // (1) No children -
2078 //     We are a leaf; add _leaf field as an input to the MachNode
2079 // (2) Child is an internal operand -
2080 //     Skip over it ( do nothing )
2081 // (3) Child is an instruction -
2082 //     Call ReduceInst recursively and
2083 //     and instruction as an input to the MachNode
2084 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
2085   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
2086   State *kid = s->_kids[0];
2087   assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" );
2088 
2089   // Leaf?  And not subsumed?
2090   if( kid == nullptr && !_swallowed[rule] ) {
2091     mach->add_req( s->_leaf );  // Add leaf pointer
2092     return;                     // Bail out
2093   }
2094 
2095   if( s->_leaf->is_Load() ) {
2096     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
2097     mem = s->_leaf->in(MemNode::Memory);
2098     debug_only(_mem_node = s->_leaf;)
2099   }
2100 
2101   handle_precedence_edges(s->_leaf, mach);
2102 
2103   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
2104     if( !mach->in(0) )
2105       mach->set_req(0,s->_leaf->in(0));
2106     else {
2107       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
2108     }
2109   }
2110 
2111   for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) {   // binary tree
2112     int newrule;
2113     if( i == 0) {
2114       newrule = kid->rule(_leftOp[rule]);
2115     } else {
2116       newrule = kid->rule(_rightOp[rule]);
2117     }
2118 
2119     if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2120       // Internal operand; recurse but do nothing else
2121       ReduceOper(kid, newrule, mem, mach);
2122 
2123     } else {                    // Child is a new instruction
2124       // Reduce the instruction, and add a direct pointer from this
2125       // machine instruction to the newly reduced one.
2126       Node *mem1 = (Node*)1;
2127       debug_only(Node *save_mem_node = _mem_node;)
2128       mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2129       debug_only(_mem_node = save_mem_node;)
2130     }
2131   }
2132 }
2133 
2134 
2135 // -------------------------------------------------------------------------
2136 // Java-Java calling convention
2137 // (what you use when Java calls Java)
2138 
2139 //------------------------------find_receiver----------------------------------
2140 // For a given signature, return the OptoReg for parameter 0.
2141 OptoReg::Name Matcher::find_receiver() {
2142   VMRegPair regs;
2143   BasicType sig_bt = T_OBJECT;
2144   SharedRuntime::java_calling_convention(&sig_bt, &regs, 1);
2145   // Return argument 0 register.  In the LP64 build pointers
2146   // take 2 registers, but the VM wants only the 'main' name.
2147   return OptoReg::as_OptoReg(regs.first());
2148 }
2149 
2150 bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2151   if (n != nullptr && m != nullptr) {
2152     return VectorNode::is_vector_shift(n) &&
2153            VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2154   }
2155   return false;
2156 }
2157 
2158 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2159   // Must clone all producers of flags, or we will not match correctly.
2160   // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2161   // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2162   // are also there, so we may match a float-branch to int-flags and
2163   // expect the allocator to haul the flags from the int-side to the
2164   // fp-side.  No can do.
2165   if (_must_clone[m->Opcode()]) {
2166     mstack.push(m, Visit);
2167     return true;
2168   }
2169   return pd_clone_node(n, m, mstack);
2170 }
2171 
2172 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2173   Node *off = m->in(AddPNode::Offset);
2174   if (off->is_Con()) {
2175     address_visited.test_set(m->_idx); // Flag as address_visited
2176     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2177     // Clone X+offset as it also folds into most addressing expressions
2178     mstack.push(off, Visit);
2179     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2180     return true;
2181   }
2182   return false;
2183 }
2184 
2185 // A method-klass-holder may be passed in the inline_cache_reg
2186 // and then expanded into the inline_cache_reg and a method_ptr register
2187 //   defined in ad_<arch>.cpp
2188 
2189 //------------------------------find_shared------------------------------------
2190 // Set bits if Node is shared or otherwise a root
2191 void Matcher::find_shared(Node* n) {
2192   // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2193   MStack mstack(C->live_nodes() * 2);
2194   // Mark nodes as address_visited if they are inputs to an address expression
2195   VectorSet address_visited;
2196   mstack.push(n, Visit);     // Don't need to pre-visit root node
2197   while (mstack.is_nonempty()) {
2198     n = mstack.node();       // Leave node on stack
2199     Node_State nstate = mstack.state();
2200     uint nop = n->Opcode();
2201     if (nstate == Pre_Visit) {
2202       if (address_visited.test(n->_idx)) { // Visited in address already?
2203         // Flag as visited and shared now.
2204         set_visited(n);
2205       }
2206       if (is_visited(n)) {   // Visited already?
2207         // Node is shared and has no reason to clone.  Flag it as shared.
2208         // This causes it to match into a register for the sharing.
2209         set_shared(n);       // Flag as shared and
2210         if (n->is_DecodeNarrowPtr()) {
2211           // Oop field/array element loads must be shared but since
2212           // they are shared through a DecodeN they may appear to have
2213           // a single use so force sharing here.
2214           set_shared(n->in(1));
2215         }
2216         mstack.pop();        // remove node from stack
2217         continue;
2218       }
2219       nstate = Visit; // Not already visited; so visit now
2220     }
2221     if (nstate == Visit) {
2222       mstack.set_state(Post_Visit);
2223       set_visited(n);   // Flag as visited now
2224       bool mem_op = false;
2225       int mem_addr_idx = MemNode::Address;
2226       if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2227         continue;
2228       }
2229       for (int i = n->len() - 1; i >= 0; --i) { // For my children
2230         Node* m = n->in(i); // Get ith input
2231         if (m == nullptr) {
2232           continue;  // Ignore nulls
2233         }
2234         if (clone_node(n, m, mstack)) {
2235           continue;
2236         }
2237 
2238         // Clone addressing expressions as they are "free" in memory access instructions
2239         if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2240             // When there are other uses besides address expressions
2241             // put it on stack and mark as shared.
2242             !is_visited(m)) {
2243           // Some inputs for address expression are not put on stack
2244           // to avoid marking them as shared and forcing them into register
2245           // if they are used only in address expressions.
2246           // But they should be marked as shared if there are other uses
2247           // besides address expressions.
2248 
2249           if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2250             continue;
2251           }
2252         }   // if( mem_op &&
2253         mstack.push(m, Pre_Visit);
2254       }     // for(int i = ...)
2255     }
2256     else if (nstate == Alt_Post_Visit) {
2257       mstack.pop(); // Remove node from stack
2258       // We cannot remove the Cmp input from the Bool here, as the Bool may be
2259       // shared and all users of the Bool need to move the Cmp in parallel.
2260       // This leaves both the Bool and the If pointing at the Cmp.  To
2261       // prevent the Matcher from trying to Match the Cmp along both paths
2262       // BoolNode::match_edge always returns a zero.
2263 
2264       // We reorder the Op_If in a pre-order manner, so we can visit without
2265       // accidentally sharing the Cmp (the Bool and the If make 2 users).
2266       n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2267     }
2268     else if (nstate == Post_Visit) {
2269       mstack.pop(); // Remove node from stack
2270 
2271       // Now hack a few special opcodes
2272       uint opcode = n->Opcode();
2273       bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2274       if (!gc_handled) {
2275         find_shared_post_visit(n, opcode);
2276       }
2277     }
2278     else {
2279       ShouldNotReachHere();
2280     }
2281   } // end of while (mstack.is_nonempty())
2282 }
2283 
2284 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2285   switch(opcode) {  // Handle some opcodes special
2286     case Op_Phi:             // Treat Phis as shared roots
2287     case Op_Parm:
2288     case Op_Proj:            // All handled specially during matching
2289     case Op_SafePointScalarObject:
2290       set_shared(n);
2291       set_dontcare(n);
2292       break;
2293     case Op_If:
2294     case Op_CountedLoopEnd:
2295       mstack.set_state(Alt_Post_Visit); // Alternative way
2296       // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2297       // with matching cmp/branch in 1 instruction.  The Matcher needs the
2298       // Bool and CmpX side-by-side, because it can only get at constants
2299       // that are at the leaves of Match trees, and the Bool's condition acts
2300       // as a constant here.
2301       mstack.push(n->in(1), Visit);         // Clone the Bool
2302       mstack.push(n->in(0), Pre_Visit);     // Visit control input
2303       return true; // while (mstack.is_nonempty())
2304     case Op_ConvI2D:         // These forms efficiently match with a prior
2305     case Op_ConvI2F:         //   Load but not a following Store
2306       if( n->in(1)->is_Load() &&        // Prior load
2307           n->outcnt() == 1 &&           // Not already shared
2308           n->unique_out()->is_Store() ) // Following store
2309         set_shared(n);       // Force it to be a root
2310       break;
2311     case Op_ReverseBytesI:
2312     case Op_ReverseBytesL:
2313       if( n->in(1)->is_Load() &&        // Prior load
2314           n->outcnt() == 1 )            // Not already shared
2315         set_shared(n);                  // Force it to be a root
2316       break;
2317     case Op_BoxLock:         // Can't match until we get stack-regs in ADLC
2318     case Op_IfFalse:
2319     case Op_IfTrue:
2320     case Op_MachProj:
2321     case Op_MergeMem:
2322     case Op_Catch:
2323     case Op_CatchProj:
2324     case Op_CProj:
2325     case Op_JumpProj:
2326     case Op_JProj:
2327     case Op_NeverBranch:
2328       set_dontcare(n);
2329       break;
2330     case Op_Jump:
2331       mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2332       mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2333       return true;                             // while (mstack.is_nonempty())
2334     case Op_StrComp:
2335     case Op_StrEquals:
2336     case Op_StrIndexOf:
2337     case Op_StrIndexOfChar:
2338     case Op_AryEq:
2339     case Op_VectorizedHashCode:
2340     case Op_CountPositives:
2341     case Op_StrInflatedCopy:
2342     case Op_StrCompressedCopy:
2343     case Op_EncodeISOArray:
2344     case Op_FmaD:
2345     case Op_FmaF:
2346     case Op_FmaVD:
2347     case Op_FmaVF:
2348     case Op_MacroLogicV:
2349     case Op_VectorCmpMasked:
2350     case Op_CompressV:
2351     case Op_CompressM:
2352     case Op_ExpandV:
2353     case Op_VectorLoadMask:
2354       set_shared(n); // Force result into register (it will be anyways)
2355       break;
2356     case Op_ConP: {  // Convert pointers above the centerline to NUL
2357       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2358       const TypePtr* tp = tn->type()->is_ptr();
2359       if (tp->_ptr == TypePtr::AnyNull) {
2360         tn->set_type(TypePtr::NULL_PTR);
2361       }
2362       break;
2363     }
2364     case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2365       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2366       const TypePtr* tp = tn->type()->make_ptr();
2367       if (tp && tp->_ptr == TypePtr::AnyNull) {
2368         tn->set_type(TypeNarrowOop::NULL_PTR);
2369       }
2370       break;
2371     }
2372     case Op_Binary:         // These are introduced in the Post_Visit state.
2373       ShouldNotReachHere();
2374       break;
2375     case Op_ClearArray:
2376     case Op_SafePoint:
2377       mem_op = true;
2378       break;
2379     default:
2380       if( n->is_Store() ) {
2381         // Do match stores, despite no ideal reg
2382         mem_op = true;
2383         break;
2384       }
2385       if( n->is_Mem() ) { // Loads and LoadStores
2386         mem_op = true;
2387         // Loads must be root of match tree due to prior load conflict
2388         if( C->subsume_loads() == false )
2389           set_shared(n);
2390       }
2391       // Fall into default case
2392       if( !n->ideal_reg() )
2393         set_dontcare(n);  // Unmatchable Nodes
2394   } // end_switch
2395   return false;
2396 }
2397 
2398 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2399   if (n->is_predicated_vector()) {
2400     // Restructure into binary trees for Matching.
2401     if (n->req() == 4) {
2402       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2403       n->set_req(2, n->in(3));
2404       n->del_req(3);
2405     } else if (n->req() == 5) {
2406       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2407       n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2408       n->del_req(4);
2409       n->del_req(3);
2410     } else if (n->req() == 6) {
2411       Node* b3 = new BinaryNode(n->in(4), n->in(5));
2412       Node* b2 = new BinaryNode(n->in(3), b3);
2413       Node* b1 = new BinaryNode(n->in(2), b2);
2414       n->set_req(2, b1);
2415       n->del_req(5);
2416       n->del_req(4);
2417       n->del_req(3);
2418     }
2419     return;
2420   }
2421 
2422   switch(opcode) {       // Handle some opcodes special
2423     case Op_CompareAndExchangeB:
2424     case Op_CompareAndExchangeS:
2425     case Op_CompareAndExchangeI:
2426     case Op_CompareAndExchangeL:
2427     case Op_CompareAndExchangeP:
2428     case Op_CompareAndExchangeN:
2429     case Op_WeakCompareAndSwapB:
2430     case Op_WeakCompareAndSwapS:
2431     case Op_WeakCompareAndSwapI:
2432     case Op_WeakCompareAndSwapL:
2433     case Op_WeakCompareAndSwapP:
2434     case Op_WeakCompareAndSwapN:
2435     case Op_CompareAndSwapB:
2436     case Op_CompareAndSwapS:
2437     case Op_CompareAndSwapI:
2438     case Op_CompareAndSwapL:
2439     case Op_CompareAndSwapP:
2440     case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2441       Node* newval = n->in(MemNode::ValueIn);
2442       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2443       Node* pair = new BinaryNode(oldval, newval);
2444       n->set_req(MemNode::ValueIn, pair);
2445       n->del_req(LoadStoreConditionalNode::ExpectedIn);
2446       break;
2447     }
2448     case Op_CMoveD:              // Convert trinary to binary-tree
2449     case Op_CMoveF:
2450     case Op_CMoveI:
2451     case Op_CMoveL:
2452     case Op_CMoveN:
2453     case Op_CMoveP: {
2454       // Restructure into a binary tree for Matching.  It's possible that
2455       // we could move this code up next to the graph reshaping for IfNodes
2456       // or vice-versa, but I do not want to debug this for Ladybird.
2457       // 10/2/2000 CNC.
2458       Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2459       n->set_req(1, pair1);
2460       Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2461       n->set_req(2, pair2);
2462       n->del_req(3);
2463       break;
2464     }
2465     case Op_MacroLogicV: {
2466       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2467       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2468       n->set_req(1, pair1);
2469       n->set_req(2, pair2);
2470       n->del_req(4);
2471       n->del_req(3);
2472       break;
2473     }
2474     case Op_StoreVectorMasked: {
2475       Node* pair = new BinaryNode(n->in(3), n->in(4));
2476       n->set_req(3, pair);
2477       n->del_req(4);
2478       break;
2479     }
2480     case Op_SelectFromTwoVector:
2481     case Op_LoopLimit: {
2482       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2483       n->set_req(1, pair1);
2484       n->set_req(2, n->in(3));
2485       n->del_req(3);
2486       break;
2487     }
2488     case Op_StrEquals:
2489     case Op_StrIndexOfChar: {
2490       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2491       n->set_req(2, pair1);
2492       n->set_req(3, n->in(4));
2493       n->del_req(4);
2494       break;
2495     }
2496     case Op_StrComp:
2497     case Op_StrIndexOf:
2498     case Op_VectorizedHashCode: {
2499       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2500       n->set_req(2, pair1);
2501       Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2502       n->set_req(3, pair2);
2503       n->del_req(5);
2504       n->del_req(4);
2505       break;
2506     }
2507     case Op_EncodeISOArray:
2508     case Op_StrCompressedCopy:
2509     case Op_StrInflatedCopy: {
2510       // Restructure into a binary tree for Matching.
2511       Node* pair = new BinaryNode(n->in(3), n->in(4));
2512       n->set_req(3, pair);
2513       n->del_req(4);
2514       break;
2515     }
2516     case Op_FmaD:
2517     case Op_FmaF:
2518     case Op_FmaVD:
2519     case Op_FmaVF: {
2520       // Restructure into a binary tree for Matching.
2521       Node* pair = new BinaryNode(n->in(1), n->in(2));
2522       n->set_req(2, pair);
2523       n->set_req(1, n->in(3));
2524       n->del_req(3);
2525       break;
2526     }
2527     case Op_MulAddS2I: {
2528       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2529       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2530       n->set_req(1, pair1);
2531       n->set_req(2, pair2);
2532       n->del_req(4);
2533       n->del_req(3);
2534       break;
2535     }
2536     case Op_ClearArray: {
2537       Node* pair = new BinaryNode(n->in(2), n->in(3));
2538       n->set_req(2, pair);
2539       n->set_req(3, n->in(4));
2540       n->del_req(4);
2541       break;
2542     }
2543     case Op_VectorCmpMasked:
2544     case Op_CopySignD:
2545     case Op_SignumVF:
2546     case Op_SignumVD:
2547     case Op_SignumF:
2548     case Op_SignumD: {
2549       Node* pair = new BinaryNode(n->in(2), n->in(3));
2550       n->set_req(2, pair);
2551       n->del_req(3);
2552       break;
2553     }
2554     case Op_VectorBlend:
2555     case Op_VectorInsert: {
2556       Node* pair = new BinaryNode(n->in(1), n->in(2));
2557       n->set_req(1, pair);
2558       n->set_req(2, n->in(3));
2559       n->del_req(3);
2560       break;
2561     }
2562     case Op_LoadVectorGather:
2563       if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2564         Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2565         n->set_req(MemNode::ValueIn, pair);
2566         n->del_req(MemNode::ValueIn+1);
2567       }
2568       break;
2569     case Op_LoadVectorGatherMasked:
2570       if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2571         Node* pair2 = new BinaryNode(n->in(MemNode::ValueIn + 1), n->in(MemNode::ValueIn + 2));
2572         Node* pair1 = new BinaryNode(n->in(MemNode::ValueIn), pair2);
2573         n->set_req(MemNode::ValueIn, pair1);
2574         n->del_req(MemNode::ValueIn+2);
2575         n->del_req(MemNode::ValueIn+1);
2576         break;
2577       } // fall-through
2578     case Op_StoreVectorScatter: {
2579       Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2580       n->set_req(MemNode::ValueIn, pair);
2581       n->del_req(MemNode::ValueIn+1);
2582       break;
2583     }
2584     case Op_StoreVectorScatterMasked: {
2585       Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2586       n->set_req(MemNode::ValueIn+1, pair);
2587       n->del_req(MemNode::ValueIn+2);
2588       pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2589       n->set_req(MemNode::ValueIn, pair);
2590       n->del_req(MemNode::ValueIn+1);
2591       break;
2592     }
2593     case Op_VectorMaskCmp: {
2594       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2595       n->set_req(2, n->in(3));
2596       n->del_req(3);
2597       break;
2598     }
2599     case Op_PartialSubtypeCheck: {
2600       if (UseSecondarySupersTable && n->in(2)->is_Con()) {
2601         // PartialSubtypeCheck uses both constant and register operands for superclass input.
2602         n->set_req(2, new BinaryNode(n->in(2), n->in(2)));
2603         break;
2604       }
2605       break;
2606     }
2607     default:
2608       break;
2609   }
2610 }
2611 
2612 #ifndef PRODUCT
2613 void Matcher::record_new2old(Node* newn, Node* old) {
2614   _new2old_map.map(newn->_idx, old);
2615   if (!_reused.test_set(old->_igv_idx)) {
2616     // Reuse the Ideal-level IGV identifier so that the node can be tracked
2617     // across matching. If there are multiple machine nodes expanded from the
2618     // same Ideal node, only one will reuse its IGV identifier.
2619     newn->_igv_idx = old->_igv_idx;
2620   }
2621 }
2622 
2623 // machine-independent root to machine-dependent root
2624 void Matcher::dump_old2new_map() {
2625   _old2new_map.dump();
2626 }
2627 #endif // !PRODUCT
2628 
2629 //---------------------------collect_null_checks-------------------------------
2630 // Find null checks in the ideal graph; write a machine-specific node for
2631 // it.  Used by later implicit-null-check handling.  Actually collects
2632 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2633 // value being tested.
2634 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2635   Node *iff = proj->in(0);
2636   if( iff->Opcode() == Op_If ) {
2637     // During matching If's have Bool & Cmp side-by-side
2638     BoolNode *b = iff->in(1)->as_Bool();
2639     Node *cmp = iff->in(2);
2640     int opc = cmp->Opcode();
2641     if (opc != Op_CmpP && opc != Op_CmpN) return;
2642 
2643     const Type* ct = cmp->in(2)->bottom_type();
2644     if (ct == TypePtr::NULL_PTR ||
2645         (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2646 
2647       bool push_it = false;
2648       if( proj->Opcode() == Op_IfTrue ) {
2649 #ifndef PRODUCT
2650         extern uint all_null_checks_found;
2651         all_null_checks_found++;
2652 #endif
2653         if( b->_test._test == BoolTest::ne ) {
2654           push_it = true;
2655         }
2656       } else {
2657         assert( proj->Opcode() == Op_IfFalse, "" );
2658         if( b->_test._test == BoolTest::eq ) {
2659           push_it = true;
2660         }
2661       }
2662       if( push_it ) {
2663         _null_check_tests.push(proj);
2664         Node* val = cmp->in(1);
2665 #ifdef _LP64
2666         if (val->bottom_type()->isa_narrowoop() &&
2667             !Matcher::narrow_oop_use_complex_address()) {
2668           //
2669           // Look for DecodeN node which should be pinned to orig_proj.
2670           // On platforms (Sparc) which can not handle 2 adds
2671           // in addressing mode we have to keep a DecodeN node and
2672           // use it to do implicit null check in address.
2673           //
2674           // DecodeN node was pinned to non-null path (orig_proj) during
2675           // CastPP transformation in final_graph_reshaping_impl().
2676           //
2677           uint cnt = orig_proj->outcnt();
2678           for (uint i = 0; i < orig_proj->outcnt(); i++) {
2679             Node* d = orig_proj->raw_out(i);
2680             if (d->is_DecodeN() && d->in(1) == val) {
2681               val = d;
2682               val->set_req(0, nullptr); // Unpin now.
2683               // Mark this as special case to distinguish from
2684               // a regular case: CmpP(DecodeN, null).
2685               val = (Node*)(((intptr_t)val) | 1);
2686               break;
2687             }
2688           }
2689         }
2690 #endif
2691         _null_check_tests.push(val);
2692       }
2693     }
2694   }
2695 }
2696 
2697 //---------------------------validate_null_checks------------------------------
2698 // Its possible that the value being null checked is not the root of a match
2699 // tree.  If so, I cannot use the value in an implicit null check.
2700 void Matcher::validate_null_checks( ) {
2701   uint cnt = _null_check_tests.size();
2702   for( uint i=0; i < cnt; i+=2 ) {
2703     Node *test = _null_check_tests[i];
2704     Node *val = _null_check_tests[i+1];
2705     bool is_decoden = ((intptr_t)val) & 1;
2706     val = (Node*)(((intptr_t)val) & ~1);
2707     if (has_new_node(val)) {
2708       Node* new_val = new_node(val);
2709       if (is_decoden) {
2710         assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity");
2711         // Note: new_val may have a control edge if
2712         // the original ideal node DecodeN was matched before
2713         // it was unpinned in Matcher::collect_null_checks().
2714         // Unpin the mach node and mark it.
2715         new_val->set_req(0, nullptr);
2716         new_val = (Node*)(((intptr_t)new_val) | 1);
2717       }
2718       // Is a match-tree root, so replace with the matched value
2719       _null_check_tests.map(i+1, new_val);
2720     } else {
2721       // Yank from candidate list
2722       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2723       _null_check_tests.map(i,_null_check_tests[--cnt]);
2724       _null_check_tests.pop();
2725       _null_check_tests.pop();
2726       i-=2;
2727     }
2728   }
2729 }
2730 
2731 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2732   // Advice matcher to perform null checks on the narrow oop side.
2733   // Implicit checks are not possible on the uncompressed oop side anyway
2734   // (at least not for read accesses).
2735   // Performs significantly better (especially on Power 6).
2736   if (!os::zero_page_read_protected()) {
2737     return true;
2738   }
2739   return CompressedOops::use_implicit_null_checks() &&
2740          (narrow_oop_use_complex_address() ||
2741           CompressedOops::base() != nullptr);
2742 }
2743 
2744 // Compute RegMask for an ideal register.
2745 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2746   assert(!C->failing_internal() || C->failure_is_artificial(), "already failing.");
2747   if (C->failing()) {
2748     return nullptr;
2749   }
2750   const Type* t = Type::mreg2type[ideal_reg];
2751   if (t == nullptr) {
2752     assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2753     return nullptr; // not supported
2754   }
2755   Node* fp  = ret->in(TypeFunc::FramePtr);
2756   Node* mem = ret->in(TypeFunc::Memory);
2757   const TypePtr* atp = TypePtr::BOTTOM;
2758   MemNode::MemOrd mo = MemNode::unordered;
2759 
2760   Node* spill;
2761   switch (ideal_reg) {
2762     case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break;
2763     case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(),       mo); break;
2764     case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(),       mo); break;
2765     case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t,                 mo); break;
2766     case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t,                 mo); break;
2767     case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(),      mo); break;
2768 
2769     case Op_VecA: // fall-through
2770     case Op_VecS: // fall-through
2771     case Op_VecD: // fall-through
2772     case Op_VecX: // fall-through
2773     case Op_VecY: // fall-through
2774     case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break;
2775     case Op_RegVectMask: return Matcher::predicate_reg_mask();
2776 
2777     default: ShouldNotReachHere();
2778   }
2779   MachNode* mspill = match_tree(spill);
2780   assert(mspill != nullptr || C->failure_is_artificial(), "matching failed: %d", ideal_reg);
2781   if (C->failing()) {
2782     return nullptr;
2783   }
2784   // Handle generic vector operand case
2785   if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2786     specialize_mach_node(mspill);
2787   }
2788   return &mspill->out_RegMask();
2789 }
2790 
2791 // Process Mach IR right after selection phase is over.
2792 void Matcher::do_postselect_cleanup() {
2793   if (supports_generic_vector_operands) {
2794     specialize_generic_vector_operands();
2795     if (C->failing())  return;
2796   }
2797 }
2798 
2799 //----------------------------------------------------------------------
2800 // Generic machine operands elision.
2801 //----------------------------------------------------------------------
2802 
2803 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2804 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2805   assert(use->in(idx) == tmp, "not a user");
2806   assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2807 
2808   if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2809     tmp->_opnds[0] = use->_opnds[0]->clone();
2810   } else {
2811     uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2812     tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2813   }
2814 }
2815 
2816 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2817 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2818   assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2819   Node* def = nullptr;
2820   if (opnd_idx == 0) { // DEF
2821     def = m; // use mach node itself to compute vector operand type
2822   } else {
2823     int base_idx = m->operand_index(opnd_idx);
2824     def = m->in(base_idx);
2825     if (def->is_Mach()) {
2826       if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2827         specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2828       } else if (is_reg2reg_move(def->as_Mach())) {
2829         def = def->in(1); // skip over generic reg-to-reg moves
2830       }
2831     }
2832   }
2833   assert(def->bottom_type()->isa_vect(), "not a vector");
2834   uint ideal_vreg = def->bottom_type()->ideal_reg();
2835   return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2836 }
2837 
2838 void Matcher::specialize_mach_node(MachNode* m) {
2839   assert(!m->is_MachTemp(), "processed along with its user");
2840   // For generic use operands pull specific register class operands from
2841   // its def instruction's output operand (def operand).
2842   for (uint i = 0; i < m->num_opnds(); i++) {
2843     if (Matcher::is_generic_vector(m->_opnds[i])) {
2844       m->_opnds[i] = specialize_vector_operand(m, i);
2845     }
2846   }
2847 }
2848 
2849 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2850 void Matcher::specialize_generic_vector_operands() {
2851   assert(supports_generic_vector_operands, "sanity");
2852   ResourceMark rm;
2853 
2854   // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2855   // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2856   Unique_Node_List live_nodes;
2857   C->identify_useful_nodes(live_nodes);
2858 
2859   while (live_nodes.size() > 0) {
2860     MachNode* m = live_nodes.pop()->isa_Mach();
2861     if (m != nullptr) {
2862       if (Matcher::is_reg2reg_move(m)) {
2863         // Register allocator properly handles vec <=> leg moves using register masks.
2864         int opnd_idx = m->operand_index(1);
2865         Node* def = m->in(opnd_idx);
2866         m->subsume_by(def, C);
2867       } else if (m->is_MachTemp()) {
2868         // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2869       } else {
2870         specialize_mach_node(m);
2871       }
2872     }
2873   }
2874 }
2875 
2876 uint Matcher::vector_length(const Node* n) {
2877   const TypeVect* vt = n->bottom_type()->is_vect();
2878   return vt->length();
2879 }
2880 
2881 uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2882   int def_idx = use->operand_index(opnd);
2883   Node* def = use->in(def_idx);
2884   return def->bottom_type()->is_vect()->length();
2885 }
2886 
2887 uint Matcher::vector_length_in_bytes(const Node* n) {
2888   const TypeVect* vt = n->bottom_type()->is_vect();
2889   return vt->length_in_bytes();
2890 }
2891 
2892 uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2893   uint def_idx = use->operand_index(opnd);
2894   Node* def = use->in(def_idx);
2895   return def->bottom_type()->is_vect()->length_in_bytes();
2896 }
2897 
2898 BasicType Matcher::vector_element_basic_type(const Node* n) {
2899   const TypeVect* vt = n->bottom_type()->is_vect();
2900   return vt->element_basic_type();
2901 }
2902 
2903 BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2904   int def_idx = use->operand_index(opnd);
2905   Node* def = use->in(def_idx);
2906   return def->bottom_type()->is_vect()->element_basic_type();
2907 }
2908 
2909 bool Matcher::is_non_long_integral_vector(const Node* n) {
2910   BasicType bt = vector_element_basic_type(n);
2911   assert(bt != T_CHAR, "char is not allowed in vector");
2912   return is_subword_type(bt) || bt == T_INT;
2913 }
2914 
2915 bool Matcher::is_encode_and_store_pattern(const Node* n, const Node* m) {
2916   if (n == nullptr ||
2917       m == nullptr ||
2918       n->Opcode() != Op_StoreN ||
2919       !m->is_EncodeP() ||
2920       n->as_Store()->barrier_data() == 0) {
2921     return false;
2922   }
2923   assert(m == n->in(MemNode::ValueIn), "m should be input to n");
2924   return true;
2925 }
2926 
2927 #ifdef ASSERT
2928 bool Matcher::verify_after_postselect_cleanup() {
2929   assert(!C->failing_internal() || C->failure_is_artificial(), "sanity");
2930   if (supports_generic_vector_operands) {
2931     Unique_Node_List useful;
2932     C->identify_useful_nodes(useful);
2933     for (uint i = 0; i < useful.size(); i++) {
2934       MachNode* m = useful.at(i)->isa_Mach();
2935       if (m != nullptr) {
2936         assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
2937         for (uint j = 0; j < m->num_opnds(); j++) {
2938           assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2939         }
2940       }
2941     }
2942   }
2943   return true;
2944 }
2945 #endif // ASSERT
2946 
2947 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2948 // atomic instruction acting as a store_load barrier without any
2949 // intervening volatile load, and thus we don't need a barrier here.
2950 // We retain the Node to act as a compiler ordering barrier.
2951 bool Matcher::post_store_load_barrier(const Node* vmb) {
2952   Compile* C = Compile::current();
2953   assert(vmb->is_MemBar(), "");
2954   assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2955   const MemBarNode* membar = vmb->as_MemBar();
2956 
2957   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2958   Node* ctrl = nullptr;
2959   for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2960     Node* p = membar->fast_out(i);
2961     assert(p->is_Proj(), "only projections here");
2962     if ((p->as_Proj()->_con == TypeFunc::Control) &&
2963         !C->node_arena()->contains(p)) { // Unmatched old-space only
2964       ctrl = p;
2965       break;
2966     }
2967   }
2968   assert((ctrl != nullptr), "missing control projection");
2969 
2970   for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2971     Node *x = ctrl->fast_out(j);
2972     int xop = x->Opcode();
2973 
2974     // We don't need current barrier if we see another or a lock
2975     // before seeing volatile load.
2976     //
2977     // Op_Fastunlock previously appeared in the Op_* list below.
2978     // With the advent of 1-0 lock operations we're no longer guaranteed
2979     // that a monitor exit operation contains a serializing instruction.
2980 
2981     if (xop == Op_MemBarVolatile ||
2982         xop == Op_CompareAndExchangeB ||
2983         xop == Op_CompareAndExchangeS ||
2984         xop == Op_CompareAndExchangeI ||
2985         xop == Op_CompareAndExchangeL ||
2986         xop == Op_CompareAndExchangeP ||
2987         xop == Op_CompareAndExchangeN ||
2988         xop == Op_WeakCompareAndSwapB ||
2989         xop == Op_WeakCompareAndSwapS ||
2990         xop == Op_WeakCompareAndSwapL ||
2991         xop == Op_WeakCompareAndSwapP ||
2992         xop == Op_WeakCompareAndSwapN ||
2993         xop == Op_WeakCompareAndSwapI ||
2994         xop == Op_CompareAndSwapB ||
2995         xop == Op_CompareAndSwapS ||
2996         xop == Op_CompareAndSwapL ||
2997         xop == Op_CompareAndSwapP ||
2998         xop == Op_CompareAndSwapN ||
2999         xop == Op_CompareAndSwapI ||
3000         BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
3001       return true;
3002     }
3003 
3004     // Op_FastLock previously appeared in the Op_* list above.
3005     if (xop == Op_FastLock) {
3006       return true;
3007     }
3008 
3009     if (x->is_MemBar()) {
3010       // We must retain this membar if there is an upcoming volatile
3011       // load, which will be followed by acquire membar.
3012       if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
3013         return false;
3014       } else {
3015         // For other kinds of barriers, check by pretending we
3016         // are them, and seeing if we can be removed.
3017         return post_store_load_barrier(x->as_MemBar());
3018       }
3019     }
3020 
3021     // probably not necessary to check for these
3022     if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
3023       return false;
3024     }
3025   }
3026   return false;
3027 }
3028 
3029 // Check whether node n is a branch to an uncommon trap that we could
3030 // optimize as test with very high branch costs in case of going to
3031 // the uncommon trap. The code must be able to be recompiled to use
3032 // a cheaper test.
3033 bool Matcher::branches_to_uncommon_trap(const Node *n) {
3034   // Don't do it for natives, adapters, or runtime stubs
3035   Compile *C = Compile::current();
3036   if (!C->is_method_compilation()) return false;
3037 
3038   assert(n->is_If(), "You should only call this on if nodes.");
3039   IfNode *ifn = n->as_If();
3040 
3041   Node *ifFalse = nullptr;
3042   for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
3043     if (ifn->fast_out(i)->is_IfFalse()) {
3044       ifFalse = ifn->fast_out(i);
3045       break;
3046     }
3047   }
3048   assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
3049 
3050   Node *reg = ifFalse;
3051   int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
3052                // Alternatively use visited set?  Seems too expensive.
3053   while (reg != nullptr && cnt > 0) {
3054     CallNode *call = nullptr;
3055     RegionNode *nxt_reg = nullptr;
3056     for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
3057       Node *o = reg->fast_out(i);
3058       if (o->is_Call()) {
3059         call = o->as_Call();
3060       }
3061       if (o->is_Region()) {
3062         nxt_reg = o->as_Region();
3063       }
3064     }
3065 
3066     if (call &&
3067         call->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point()) {
3068       const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
3069       if (trtype->isa_int() && trtype->is_int()->is_con()) {
3070         jint tr_con = trtype->is_int()->get_con();
3071         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
3072         Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
3073         assert((int)reason < (int)BitsPerInt, "recode bit map");
3074 
3075         if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
3076             && action != Deoptimization::Action_none) {
3077           // This uncommon trap is sure to recompile, eventually.
3078           // When that happens, C->too_many_traps will prevent
3079           // this transformation from happening again.
3080           return true;
3081         }
3082       }
3083     }
3084 
3085     reg = nxt_reg;
3086     cnt--;
3087   }
3088 
3089   return false;
3090 }
3091 
3092 //=============================================================================
3093 //---------------------------State---------------------------------------------
3094 State::State(void) : _rule() {
3095 #ifdef ASSERT
3096   _id = 0;
3097   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3098   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3099 #endif
3100 }
3101 
3102 #ifdef ASSERT
3103 State::~State() {
3104   _id = 99;
3105   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3106   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3107   memset(_cost, -3, sizeof(_cost));
3108   memset(_rule, -3, sizeof(_rule));
3109 }
3110 #endif
3111 
3112 #ifndef PRODUCT
3113 //---------------------------dump----------------------------------------------
3114 void State::dump() {
3115   tty->print("\n");
3116   dump(0);
3117 }
3118 
3119 void State::dump(int depth) {
3120   for (int j = 0; j < depth; j++) {
3121     tty->print("   ");
3122   }
3123   tty->print("--N: ");
3124   _leaf->dump();
3125   uint i;
3126   for (i = 0; i < _LAST_MACH_OPER; i++) {
3127     // Check for valid entry
3128     if (valid(i)) {
3129       for (int j = 0; j < depth; j++) {
3130         tty->print("   ");
3131       }
3132       assert(cost(i) != max_juint, "cost must be a valid value");
3133       assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
3134       tty->print_cr("%s  %d  %s",
3135                     ruleName[i], cost(i), ruleName[rule(i)] );
3136     }
3137   }
3138   tty->cr();
3139 
3140   for (i = 0; i < 2; i++) {
3141     if (_kids[i]) {
3142       _kids[i]->dump(depth + 1);
3143     }
3144   }
3145 }
3146 #endif