1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compressedOops.hpp"
  31 #include "opto/ad.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/idealGraphPrinter.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/opcodes.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/type.hpp"
  43 #include "opto/vectornode.hpp"
  44 #include "runtime/os.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/align.hpp"
  47 
  48 OptoReg::Name OptoReg::c_frame_pointer;
  49 
  50 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  51 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  52 RegMask Matcher::caller_save_regmask;
  53 RegMask Matcher::caller_save_regmask_exclude_soe;
  54 RegMask Matcher::mh_caller_save_regmask;
  55 RegMask Matcher::mh_caller_save_regmask_exclude_soe;
  56 RegMask Matcher::STACK_ONLY_mask;
  57 RegMask Matcher::c_frame_ptr_mask;
  58 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  59 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  60 
  61 //---------------------------Matcher-------------------------------------------
  62 Matcher::Matcher()
  63 : PhaseTransform( Phase::Ins_Select ),
  64   _states_arena(Chunk::medium_size, mtCompiler),
  65   _visited(&_states_arena),
  66   _shared(&_states_arena),
  67   _dontcare(&_states_arena),
  68   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  69   _swallowed(swallowed),
  70   _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  71   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  72   _must_clone(must_clone),
  73   _shared_nodes(C->comp_arena()),
  74 #ifndef PRODUCT
  75   _old2new_map(C->comp_arena()),
  76   _new2old_map(C->comp_arena()),
  77   _reused(C->comp_arena()),
  78 #endif // !PRODUCT
  79   _allocation_started(false),
  80   _ruleName(ruleName),
  81   _register_save_policy(register_save_policy),
  82   _c_reg_save_policy(c_reg_save_policy),
  83   _register_save_type(register_save_type) {
  84   C->set_matcher(this);
  85 
  86   idealreg2spillmask  [Op_RegI] = NULL;
  87   idealreg2spillmask  [Op_RegN] = NULL;
  88   idealreg2spillmask  [Op_RegL] = NULL;
  89   idealreg2spillmask  [Op_RegF] = NULL;
  90   idealreg2spillmask  [Op_RegD] = NULL;
  91   idealreg2spillmask  [Op_RegP] = NULL;
  92   idealreg2spillmask  [Op_VecA] = NULL;
  93   idealreg2spillmask  [Op_VecS] = NULL;
  94   idealreg2spillmask  [Op_VecD] = NULL;
  95   idealreg2spillmask  [Op_VecX] = NULL;
  96   idealreg2spillmask  [Op_VecY] = NULL;
  97   idealreg2spillmask  [Op_VecZ] = NULL;
  98   idealreg2spillmask  [Op_RegFlags] = NULL;
  99   idealreg2spillmask  [Op_RegVectMask] = NULL;
 100 
 101   idealreg2debugmask  [Op_RegI] = NULL;
 102   idealreg2debugmask  [Op_RegN] = NULL;
 103   idealreg2debugmask  [Op_RegL] = NULL;
 104   idealreg2debugmask  [Op_RegF] = NULL;
 105   idealreg2debugmask  [Op_RegD] = NULL;
 106   idealreg2debugmask  [Op_RegP] = NULL;
 107   idealreg2debugmask  [Op_VecA] = NULL;
 108   idealreg2debugmask  [Op_VecS] = NULL;
 109   idealreg2debugmask  [Op_VecD] = NULL;
 110   idealreg2debugmask  [Op_VecX] = NULL;
 111   idealreg2debugmask  [Op_VecY] = NULL;
 112   idealreg2debugmask  [Op_VecZ] = NULL;
 113   idealreg2debugmask  [Op_RegFlags] = NULL;
 114   idealreg2debugmask  [Op_RegVectMask] = NULL;
 115 
 116   idealreg2mhdebugmask[Op_RegI] = NULL;
 117   idealreg2mhdebugmask[Op_RegN] = NULL;
 118   idealreg2mhdebugmask[Op_RegL] = NULL;
 119   idealreg2mhdebugmask[Op_RegF] = NULL;
 120   idealreg2mhdebugmask[Op_RegD] = NULL;
 121   idealreg2mhdebugmask[Op_RegP] = NULL;
 122   idealreg2mhdebugmask[Op_VecA] = NULL;
 123   idealreg2mhdebugmask[Op_VecS] = NULL;
 124   idealreg2mhdebugmask[Op_VecD] = NULL;
 125   idealreg2mhdebugmask[Op_VecX] = NULL;
 126   idealreg2mhdebugmask[Op_VecY] = NULL;
 127   idealreg2mhdebugmask[Op_VecZ] = NULL;
 128   idealreg2mhdebugmask[Op_RegFlags] = NULL;
 129   idealreg2mhdebugmask[Op_RegVectMask] = NULL;
 130 
 131   debug_only(_mem_node = NULL;)   // Ideal memory node consumed by mach node
 132 }
 133 
 134 //------------------------------warp_incoming_stk_arg------------------------
 135 // This warps a VMReg into an OptoReg::Name
 136 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 137   OptoReg::Name warped;
 138   if( reg->is_stack() ) {  // Stack slot argument?
 139     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 140     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 141     if( warped >= _in_arg_limit )
 142       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 143     if (!RegMask::can_represent_arg(warped)) {
 144       // the compiler cannot represent this method's calling sequence
 145       C->record_method_not_compilable("unsupported incoming calling sequence");
 146       return OptoReg::Bad;
 147     }
 148     return warped;
 149   }
 150   return OptoReg::as_OptoReg(reg);
 151 }
 152 
 153 //---------------------------compute_old_SP------------------------------------
 154 OptoReg::Name Compile::compute_old_SP() {
 155   int fixed    = fixed_slots();
 156   int preserve = in_preserve_stack_slots();
 157   return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
 158 }
 159 
 160 
 161 
 162 #ifdef ASSERT
 163 void Matcher::verify_new_nodes_only(Node* xroot) {
 164   // Make sure that the new graph only references new nodes
 165   ResourceMark rm;
 166   Unique_Node_List worklist;
 167   VectorSet visited;
 168   worklist.push(xroot);
 169   while (worklist.size() > 0) {
 170     Node* n = worklist.pop();
 171     visited.set(n->_idx);
 172     assert(C->node_arena()->contains(n), "dead node");
 173     for (uint j = 0; j < n->req(); j++) {
 174       Node* in = n->in(j);
 175       if (in != NULL) {
 176         assert(C->node_arena()->contains(in), "dead node");
 177         if (!visited.test(in->_idx)) {
 178           worklist.push(in);
 179         }
 180       }
 181     }
 182   }
 183 }
 184 #endif
 185 
 186 // Array of RegMask, one per returned values (inline type instances can
 187 // be returned as multiple return values, one per field)
 188 RegMask* Matcher::return_values_mask(const TypeFunc* tf) {
 189   const TypeTuple* range = tf->range_cc();
 190   uint cnt = range->cnt() - TypeFunc::Parms;
 191   if (cnt == 0) {
 192     return NULL;
 193   }
 194   RegMask* mask = NEW_RESOURCE_ARRAY(RegMask, cnt);
 195   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt);
 196   VMRegPair* vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt);
 197   for (uint i = 0; i < cnt; i++) {
 198     sig_bt[i] = range->field_at(i+TypeFunc::Parms)->basic_type();
 199   }
 200 
 201   int regs = SharedRuntime::java_return_convention(sig_bt, vm_parm_regs, cnt);
 202   if (regs <= 0) {
 203     // We ran out of registers to store the IsInit information for a nullable inline type return.
 204     // Since it is only set in the 'call_epilog', we can simply put it on the stack.
 205     assert(tf->returns_inline_type_as_fields(), "should have been tested during graph construction");
 206     // TODO 8284443 Can we teach the register allocator to reserve a stack slot instead?
 207     // mask[--cnt] = STACK_ONLY_mask does not work (test with -XX:+StressGCM)
 208     int slot = C->fixed_slots() - 2;
 209     if (C->needs_stack_repair()) {
 210       slot -= 2; // Account for stack increment value
 211     }
 212     mask[--cnt].Clear();
 213     mask[cnt].Insert(OptoReg::stack2reg(slot));
 214   }
 215   for (uint i = 0; i < cnt; i++) {
 216     mask[i].Clear();
 217 
 218     OptoReg::Name reg1 = OptoReg::as_OptoReg(vm_parm_regs[i].first());
 219     if (OptoReg::is_valid(reg1)) {
 220       mask[i].Insert(reg1);
 221     }
 222     OptoReg::Name reg2 = OptoReg::as_OptoReg(vm_parm_regs[i].second());
 223     if (OptoReg::is_valid(reg2)) {
 224       mask[i].Insert(reg2);
 225     }
 226   }
 227 
 228   return mask;
 229 }
 230 
 231 //---------------------------match---------------------------------------------
 232 void Matcher::match( ) {
 233   if( MaxLabelRootDepth < 100 ) { // Too small?
 234     assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
 235     MaxLabelRootDepth = 100;
 236   }
 237   // One-time initialization of some register masks.
 238   init_spill_mask( C->root()->in(1) );
 239   _return_addr_mask = return_addr();
 240 #ifdef _LP64
 241   // Pointers take 2 slots in 64-bit land
 242   _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
 243 #endif
 244 
 245   // Map Java-signature return types into return register-value
 246   // machine registers.
 247   _return_values_mask = return_values_mask(C->tf());
 248 
 249   // ---------------
 250   // Frame Layout
 251 
 252   // Need the method signature to determine the incoming argument types,
 253   // because the types determine which registers the incoming arguments are
 254   // in, and this affects the matched code.
 255   const TypeTuple *domain = C->tf()->domain_cc();
 256   uint             argcnt = domain->cnt() - TypeFunc::Parms;
 257   BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
 258   VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
 259   _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
 260   _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
 261   uint i;
 262   for( i = 0; i<argcnt; i++ ) {
 263     sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
 264   }
 265 
 266   // Pass array of ideal registers and length to USER code (from the AD file)
 267   // that will convert this to an array of register numbers.
 268   const StartNode *start = C->start();
 269   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
 270 #ifdef ASSERT
 271   // Sanity check users' calling convention.  Real handy while trying to
 272   // get the initial port correct.
 273   { for (uint i = 0; i<argcnt; i++) {
 274       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 275         assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
 276         _parm_regs[i].set_bad();
 277         continue;
 278       }
 279       VMReg parm_reg = vm_parm_regs[i].first();
 280       assert(parm_reg->is_valid(), "invalid arg?");
 281       if (parm_reg->is_reg()) {
 282         OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
 283         assert(can_be_java_arg(opto_parm_reg) ||
 284                C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
 285                opto_parm_reg == inline_cache_reg(),
 286                "parameters in register must be preserved by runtime stubs");
 287       }
 288       for (uint j = 0; j < i; j++) {
 289         assert(parm_reg != vm_parm_regs[j].first(),
 290                "calling conv. must produce distinct regs");
 291       }
 292     }
 293   }
 294 #endif
 295 
 296   // Do some initial frame layout.
 297 
 298   // Compute the old incoming SP (may be called FP) as
 299   //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
 300   _old_SP = C->compute_old_SP();
 301   assert( is_even(_old_SP), "must be even" );
 302 
 303   // Compute highest incoming stack argument as
 304   //   _old_SP + out_preserve_stack_slots + incoming argument size.
 305   _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 306   assert( is_even(_in_arg_limit), "out_preserve must be even" );
 307   for( i = 0; i < argcnt; i++ ) {
 308     // Permit args to have no register
 309     _calling_convention_mask[i].Clear();
 310     if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 311       _parm_regs[i].set_bad();
 312       continue;
 313     }
 314     // calling_convention returns stack arguments as a count of
 315     // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
 316     // the allocators point of view, taking into account all the
 317     // preserve area, locks & pad2.
 318 
 319     OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
 320     if( OptoReg::is_valid(reg1))
 321       _calling_convention_mask[i].Insert(reg1);
 322 
 323     OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
 324     if( OptoReg::is_valid(reg2))
 325       _calling_convention_mask[i].Insert(reg2);
 326 
 327     // Saved biased stack-slot register number
 328     _parm_regs[i].set_pair(reg2, reg1);
 329   }
 330 
 331   // Finally, make sure the incoming arguments take up an even number of
 332   // words, in case the arguments or locals need to contain doubleword stack
 333   // slots.  The rest of the system assumes that stack slot pairs (in
 334   // particular, in the spill area) which look aligned will in fact be
 335   // aligned relative to the stack pointer in the target machine.  Double
 336   // stack slots will always be allocated aligned.
 337   _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
 338 
 339   // Compute highest outgoing stack argument as
 340   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
 341   _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
 342   assert( is_even(_out_arg_limit), "out_preserve must be even" );
 343 
 344   if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
 345     // the compiler cannot represent this method's calling sequence
 346     C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
 347   }
 348 
 349   if (C->failing())  return;  // bailed out on incoming arg failure
 350 
 351   // ---------------
 352   // Collect roots of matcher trees.  Every node for which
 353   // _shared[_idx] is cleared is guaranteed to not be shared, and thus
 354   // can be a valid interior of some tree.
 355   find_shared( C->root() );
 356   find_shared( C->top() );
 357 
 358   C->print_method(PHASE_BEFORE_MATCHING, 1);
 359 
 360   // Create new ideal node ConP #NULL even if it does exist in old space
 361   // to avoid false sharing if the corresponding mach node is not used.
 362   // The corresponding mach node is only used in rare cases for derived
 363   // pointers.
 364   Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 365 
 366   // Swap out to old-space; emptying new-space
 367   Arena *old = C->node_arena()->move_contents(C->old_arena());
 368 
 369   // Save debug and profile information for nodes in old space:
 370   _old_node_note_array = C->node_note_array();
 371   if (_old_node_note_array != NULL) {
 372     C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
 373                            (C->comp_arena(), _old_node_note_array->length(),
 374                             0, NULL));
 375   }
 376 
 377   // Pre-size the new_node table to avoid the need for range checks.
 378   grow_new_node_array(C->unique());
 379 
 380   // Reset node counter so MachNodes start with _idx at 0
 381   int live_nodes = C->live_nodes();
 382   C->set_unique(0);
 383   C->reset_dead_node_list();
 384 
 385   // Recursively match trees from old space into new space.
 386   // Correct leaves of new-space Nodes; they point to old-space.
 387   _visited.clear();
 388   C->set_cached_top_node(xform( C->top(), live_nodes ));
 389   if (!C->failing()) {
 390     Node* xroot =        xform( C->root(), 1 );
 391     if (xroot == NULL) {
 392       Matcher::soft_match_failure();  // recursive matching process failed
 393       C->record_method_not_compilable("instruction match failed");
 394     } else {
 395       // During matching shared constants were attached to C->root()
 396       // because xroot wasn't available yet, so transfer the uses to
 397       // the xroot.
 398       for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
 399         Node* n = C->root()->fast_out(j);
 400         if (C->node_arena()->contains(n)) {
 401           assert(n->in(0) == C->root(), "should be control user");
 402           n->set_req(0, xroot);
 403           --j;
 404           --jmax;
 405         }
 406       }
 407 
 408       // Generate new mach node for ConP #NULL
 409       assert(new_ideal_null != NULL, "sanity");
 410       _mach_null = match_tree(new_ideal_null);
 411       // Don't set control, it will confuse GCM since there are no uses.
 412       // The control will be set when this node is used first time
 413       // in find_base_for_derived().
 414       assert(_mach_null != NULL, "");
 415 
 416       C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
 417 
 418 #ifdef ASSERT
 419       verify_new_nodes_only(xroot);
 420 #endif
 421     }
 422   }
 423   if (C->top() == NULL || C->root() == NULL) {
 424     C->record_method_not_compilable("graph lost"); // %%% cannot happen?
 425   }
 426   if (C->failing()) {
 427     // delete old;
 428     old->destruct_contents();
 429     return;
 430   }
 431   assert( C->top(), "" );
 432   assert( C->root(), "" );
 433   validate_null_checks();
 434 
 435   // Now smoke old-space
 436   NOT_DEBUG( old->destruct_contents() );
 437 
 438   // ------------------------
 439   // Set up save-on-entry registers.
 440   Fixup_Save_On_Entry( );
 441 
 442   { // Cleanup mach IR after selection phase is over.
 443     Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
 444     do_postselect_cleanup();
 445     if (C->failing())  return;
 446     assert(verify_after_postselect_cleanup(), "");
 447   }
 448 }
 449 
 450 //------------------------------Fixup_Save_On_Entry----------------------------
 451 // The stated purpose of this routine is to take care of save-on-entry
 452 // registers.  However, the overall goal of the Match phase is to convert into
 453 // machine-specific instructions which have RegMasks to guide allocation.
 454 // So what this procedure really does is put a valid RegMask on each input
 455 // to the machine-specific variations of all Return, TailCall and Halt
 456 // instructions.  It also adds edgs to define the save-on-entry values (and of
 457 // course gives them a mask).
 458 
 459 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 460   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 461   // Do all the pre-defined register masks
 462   rms[TypeFunc::Control  ] = RegMask::Empty;
 463   rms[TypeFunc::I_O      ] = RegMask::Empty;
 464   rms[TypeFunc::Memory   ] = RegMask::Empty;
 465   rms[TypeFunc::ReturnAdr] = ret_adr;
 466   rms[TypeFunc::FramePtr ] = fp;
 467   return rms;
 468 }
 469 
 470 const int Matcher::scalable_predicate_reg_slots() {
 471   assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
 472         "scalable predicate vector should be supported");
 473   int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
 474   // We assume each predicate register is one-eighth of the size of
 475   // scalable vector register, one mask bit per vector byte.
 476   int predicate_reg_bit_size = vector_reg_bit_size >> 3;
 477   // Compute number of slots which is required when scalable predicate
 478   // register is spilled. E.g. if scalable vector register is 640 bits,
 479   // predicate register is 80 bits, which is 2.5 * slots.
 480   // We will round up the slot number to power of 2, which is required
 481   // by find_first_set().
 482   int slots = predicate_reg_bit_size & (BitsPerInt - 1)
 483               ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
 484               : predicate_reg_bit_size >> LogBitsPerInt;
 485   return round_up_power_of_2(slots);
 486 }
 487 
 488 #define NOF_STACK_MASKS (3*13)
 489 
 490 // Create the initial stack mask used by values spilling to the stack.
 491 // Disallow any debug info in outgoing argument areas by setting the
 492 // initial mask accordingly.
 493 void Matcher::init_first_stack_mask() {
 494 
 495   // Allocate storage for spill masks as masks for the appropriate load type.
 496   RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
 497 
 498   // Initialize empty placeholder masks into the newly allocated arena
 499   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 500     new (rms + i) RegMask();
 501   }
 502 
 503   idealreg2spillmask  [Op_RegN] = &rms[0];
 504   idealreg2spillmask  [Op_RegI] = &rms[1];
 505   idealreg2spillmask  [Op_RegL] = &rms[2];
 506   idealreg2spillmask  [Op_RegF] = &rms[3];
 507   idealreg2spillmask  [Op_RegD] = &rms[4];
 508   idealreg2spillmask  [Op_RegP] = &rms[5];
 509 
 510   idealreg2debugmask  [Op_RegN] = &rms[6];
 511   idealreg2debugmask  [Op_RegI] = &rms[7];
 512   idealreg2debugmask  [Op_RegL] = &rms[8];
 513   idealreg2debugmask  [Op_RegF] = &rms[9];
 514   idealreg2debugmask  [Op_RegD] = &rms[10];
 515   idealreg2debugmask  [Op_RegP] = &rms[11];
 516 
 517   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 518   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 519   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 520   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 521   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 522   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 523 
 524   idealreg2spillmask  [Op_VecA] = &rms[18];
 525   idealreg2spillmask  [Op_VecS] = &rms[19];
 526   idealreg2spillmask  [Op_VecD] = &rms[20];
 527   idealreg2spillmask  [Op_VecX] = &rms[21];
 528   idealreg2spillmask  [Op_VecY] = &rms[22];
 529   idealreg2spillmask  [Op_VecZ] = &rms[23];
 530 
 531   idealreg2debugmask  [Op_VecA] = &rms[24];
 532   idealreg2debugmask  [Op_VecS] = &rms[25];
 533   idealreg2debugmask  [Op_VecD] = &rms[26];
 534   idealreg2debugmask  [Op_VecX] = &rms[27];
 535   idealreg2debugmask  [Op_VecY] = &rms[28];
 536   idealreg2debugmask  [Op_VecZ] = &rms[29];
 537 
 538   idealreg2mhdebugmask[Op_VecA] = &rms[30];
 539   idealreg2mhdebugmask[Op_VecS] = &rms[31];
 540   idealreg2mhdebugmask[Op_VecD] = &rms[32];
 541   idealreg2mhdebugmask[Op_VecX] = &rms[33];
 542   idealreg2mhdebugmask[Op_VecY] = &rms[34];
 543   idealreg2mhdebugmask[Op_VecZ] = &rms[35];
 544 
 545   idealreg2spillmask  [Op_RegVectMask] = &rms[36];
 546   idealreg2debugmask  [Op_RegVectMask] = &rms[37];
 547   idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
 548 
 549   OptoReg::Name i;
 550 
 551   // At first, start with the empty mask
 552   C->FIRST_STACK_mask().Clear();
 553 
 554   // Add in the incoming argument area
 555   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 556   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 557     C->FIRST_STACK_mask().Insert(i);
 558   }
 559 
 560   // Add in all bits past the outgoing argument area
 561   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 562             "must be able to represent all call arguments in reg mask");
 563   OptoReg::Name init = _out_arg_limit;
 564   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 565     C->FIRST_STACK_mask().Insert(i);
 566   }
 567   // Finally, set the "infinite stack" bit.
 568   C->FIRST_STACK_mask().set_AllStack();
 569 
 570   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 571   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 572   // Keep spill masks aligned.
 573   aligned_stack_mask.clear_to_pairs();
 574   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 575   RegMask scalable_stack_mask = aligned_stack_mask;
 576 
 577   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 578 #ifdef _LP64
 579   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 580    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 581    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 582 #else
 583    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 584 #endif
 585   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 586    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 587   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 588    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 589   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 590    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 591   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 592    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 593 
 594   if (Matcher::has_predicated_vectors()) {
 595     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 596      idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
 597   } else {
 598     *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
 599   }
 600 
 601   if (Matcher::vector_size_supported(T_BYTE,4)) {
 602     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 603      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 604   } else {
 605     *idealreg2spillmask[Op_VecS] = RegMask::Empty;
 606   }
 607 
 608   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 609     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 610     // RA guarantees such alignment since it is needed for Double and Long values.
 611     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 612      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 613   } else {
 614     *idealreg2spillmask[Op_VecD] = RegMask::Empty;
 615   }
 616 
 617   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 618     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
 619     //
 620     // RA can use input arguments stack slots for spills but until RA
 621     // we don't know frame size and offset of input arg stack slots.
 622     //
 623     // Exclude last input arg stack slots to avoid spilling vectors there
 624     // otherwise vector spills could stomp over stack slots in caller frame.
 625     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 626     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
 627       aligned_stack_mask.Remove(in);
 628       in = OptoReg::add(in, -1);
 629     }
 630      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 631      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 632     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 633      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 634   } else {
 635     *idealreg2spillmask[Op_VecX] = RegMask::Empty;
 636   }
 637 
 638   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 639     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 640     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 641     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 642       aligned_stack_mask.Remove(in);
 643       in = OptoReg::add(in, -1);
 644     }
 645      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 646      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 647     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 648      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 649   } else {
 650     *idealreg2spillmask[Op_VecY] = RegMask::Empty;
 651   }
 652 
 653   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 654     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 655     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 656     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 657       aligned_stack_mask.Remove(in);
 658       in = OptoReg::add(in, -1);
 659     }
 660      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 661      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 662     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 663      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 664   } else {
 665     *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
 666   }
 667 
 668   if (Matcher::supports_scalable_vector()) {
 669     int k = 1;
 670     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 671     if (Matcher::has_predicated_vectors()) {
 672       // Exclude last input arg stack slots to avoid spilling vector register there,
 673       // otherwise RegVectMask spills could stomp over stack slots in caller frame.
 674       for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
 675         scalable_stack_mask.Remove(in);
 676         in = OptoReg::add(in, -1);
 677       }
 678 
 679       // For RegVectMask
 680       scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
 681       assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 682       *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 683       idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
 684     }
 685 
 686     // Exclude last input arg stack slots to avoid spilling vector register there,
 687     // otherwise vector spills could stomp over stack slots in caller frame.
 688     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 689       scalable_stack_mask.Remove(in);
 690       in = OptoReg::add(in, -1);
 691     }
 692 
 693     // For VecA
 694      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 695      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 696     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 697      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 698   } else {
 699     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 700   }
 701 
 702   if (UseFPUForSpilling) {
 703     // This mask logic assumes that the spill operations are
 704     // symmetric and that the registers involved are the same size.
 705     // On sparc for instance we may have to use 64 bit moves will
 706     // kill 2 registers when used with F0-F31.
 707     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 708     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 709 #ifdef _LP64
 710     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 711     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 712     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 713     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 714 #else
 715     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 716 #ifdef ARM
 717     // ARM has support for moving 64bit values between a pair of
 718     // integer registers and a double register
 719     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 720     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 721 #endif
 722 #endif
 723   }
 724 
 725   // Make up debug masks.  Any spill slot plus callee-save (SOE) registers.
 726   // Caller-save (SOC, AS) registers are assumed to be trashable by the various
 727   // inline-cache fixup routines.
 728   *idealreg2debugmask  [Op_RegN] = *idealreg2spillmask[Op_RegN];
 729   *idealreg2debugmask  [Op_RegI] = *idealreg2spillmask[Op_RegI];
 730   *idealreg2debugmask  [Op_RegL] = *idealreg2spillmask[Op_RegL];
 731   *idealreg2debugmask  [Op_RegF] = *idealreg2spillmask[Op_RegF];
 732   *idealreg2debugmask  [Op_RegD] = *idealreg2spillmask[Op_RegD];
 733   *idealreg2debugmask  [Op_RegP] = *idealreg2spillmask[Op_RegP];
 734   *idealreg2debugmask  [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 735 
 736   *idealreg2debugmask  [Op_VecA] = *idealreg2spillmask[Op_VecA];
 737   *idealreg2debugmask  [Op_VecS] = *idealreg2spillmask[Op_VecS];
 738   *idealreg2debugmask  [Op_VecD] = *idealreg2spillmask[Op_VecD];
 739   *idealreg2debugmask  [Op_VecX] = *idealreg2spillmask[Op_VecX];
 740   *idealreg2debugmask  [Op_VecY] = *idealreg2spillmask[Op_VecY];
 741   *idealreg2debugmask  [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 742 
 743   *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
 744   *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
 745   *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
 746   *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
 747   *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
 748   *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
 749   *idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 750 
 751   *idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
 752   *idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
 753   *idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
 754   *idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
 755   *idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
 756   *idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 757 
 758   // Prevent stub compilations from attempting to reference
 759   // callee-saved (SOE) registers from debug info
 760   bool exclude_soe = !Compile::current()->is_method_compilation();
 761   RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
 762   RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
 763 
 764   idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
 765   idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
 766   idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
 767   idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
 768   idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
 769   idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
 770   idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
 771 
 772   idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
 773   idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
 774   idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
 775   idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
 776   idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
 777   idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
 778 
 779   idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
 780   idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
 781   idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
 782   idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
 783   idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
 784   idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
 785   idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
 786 
 787   idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
 788   idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
 789   idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
 790   idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
 791   idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
 792   idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
 793 }
 794 
 795 //---------------------------is_save_on_entry----------------------------------
 796 bool Matcher::is_save_on_entry(int reg) {
 797   return
 798     _register_save_policy[reg] == 'E' ||
 799     _register_save_policy[reg] == 'A'; // Save-on-entry register?
 800 }
 801 
 802 //---------------------------Fixup_Save_On_Entry-------------------------------
 803 void Matcher::Fixup_Save_On_Entry( ) {
 804   init_first_stack_mask();
 805 
 806   Node *root = C->root();       // Short name for root
 807   // Count number of save-on-entry registers.
 808   uint soe_cnt = number_of_saved_registers();
 809   uint i;
 810 
 811   // Find the procedure Start Node
 812   StartNode *start = C->start();
 813   assert( start, "Expect a start node" );
 814 
 815   // Input RegMask array shared by all Returns.
 816   // The type for doubles and longs has a count of 2, but
 817   // there is only 1 returned value
 818   uint ret_edge_cnt = C->tf()->range_cc()->cnt();
 819   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 820   for (i = TypeFunc::Parms; i < ret_edge_cnt; i++) {
 821     ret_rms[i] = _return_values_mask[i-TypeFunc::Parms];
 822   }
 823 
 824   // Input RegMask array shared by all Rethrows.
 825   uint reth_edge_cnt = TypeFunc::Parms+1;
 826   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 827   // Rethrow takes exception oop only, but in the argument 0 slot.
 828   OptoReg::Name reg = find_receiver();
 829   if (reg >= 0) {
 830     reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
 831 #ifdef _LP64
 832     // Need two slots for ptrs in 64-bit land
 833     reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
 834 #endif
 835   }
 836 
 837   // Input RegMask array shared by all TailCalls
 838   uint tail_call_edge_cnt = TypeFunc::Parms+2;
 839   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 840 
 841   // Input RegMask array shared by all TailJumps
 842   uint tail_jump_edge_cnt = TypeFunc::Parms+2;
 843   RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 844 
 845   // TailCalls have 2 returned values (target & moop), whose masks come
 846   // from the usual MachNode/MachOper mechanism.  Find a sample
 847   // TailCall to extract these masks and put the correct masks into
 848   // the tail_call_rms array.
 849   for( i=1; i < root->req(); i++ ) {
 850     MachReturnNode *m = root->in(i)->as_MachReturn();
 851     if( m->ideal_Opcode() == Op_TailCall ) {
 852       tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 853       tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 854       break;
 855     }
 856   }
 857 
 858   // TailJumps have 2 returned values (target & ex_oop), whose masks come
 859   // from the usual MachNode/MachOper mechanism.  Find a sample
 860   // TailJump to extract these masks and put the correct masks into
 861   // the tail_jump_rms array.
 862   for( i=1; i < root->req(); i++ ) {
 863     MachReturnNode *m = root->in(i)->as_MachReturn();
 864     if( m->ideal_Opcode() == Op_TailJump ) {
 865       tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 866       tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 867       break;
 868     }
 869   }
 870 
 871   // Input RegMask array shared by all Halts
 872   uint halt_edge_cnt = TypeFunc::Parms;
 873   RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 874 
 875   // Capture the return input masks into each exit flavor
 876   for( i=1; i < root->req(); i++ ) {
 877     MachReturnNode *exit = root->in(i)->as_MachReturn();
 878     switch( exit->ideal_Opcode() ) {
 879       case Op_Return   : exit->_in_rms = ret_rms;  break;
 880       case Op_Rethrow  : exit->_in_rms = reth_rms; break;
 881       case Op_TailCall : exit->_in_rms = tail_call_rms; break;
 882       case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
 883       case Op_Halt     : exit->_in_rms = halt_rms; break;
 884       default          : ShouldNotReachHere();
 885     }
 886   }
 887 
 888   // Next unused projection number from Start.
 889   int proj_cnt = C->tf()->domain_cc()->cnt();
 890 
 891   // Do all the save-on-entry registers.  Make projections from Start for
 892   // them, and give them a use at the exit points.  To the allocator, they
 893   // look like incoming register arguments.
 894   for( i = 0; i < _last_Mach_Reg; i++ ) {
 895     if( is_save_on_entry(i) ) {
 896 
 897       // Add the save-on-entry to the mask array
 898       ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
 899       reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
 900       tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
 901       tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
 902       // Halts need the SOE registers, but only in the stack as debug info.
 903       // A just-prior uncommon-trap or deoptimization will use the SOE regs.
 904       halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
 905 
 906       Node *mproj;
 907 
 908       // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
 909       // into a single RegD.
 910       if( (i&1) == 0 &&
 911           _register_save_type[i  ] == Op_RegF &&
 912           _register_save_type[i+1] == Op_RegF &&
 913           is_save_on_entry(i+1) ) {
 914         // Add other bit for double
 915         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 916         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 917         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 918         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 919         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 920         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
 921         proj_cnt += 2;          // Skip 2 for doubles
 922       }
 923       else if( (i&1) == 1 &&    // Else check for high half of double
 924                _register_save_type[i-1] == Op_RegF &&
 925                _register_save_type[i  ] == Op_RegF &&
 926                is_save_on_entry(i-1) ) {
 927         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 928         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 929         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 930         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 931         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 932         mproj = C->top();
 933       }
 934       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
 935       // into a single RegL.
 936       else if( (i&1) == 0 &&
 937           _register_save_type[i  ] == Op_RegI &&
 938           _register_save_type[i+1] == Op_RegI &&
 939         is_save_on_entry(i+1) ) {
 940         // Add other bit for long
 941         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 942         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 943         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 944         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 945         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 946         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
 947         proj_cnt += 2;          // Skip 2 for longs
 948       }
 949       else if( (i&1) == 1 &&    // Else check for high half of long
 950                _register_save_type[i-1] == Op_RegI &&
 951                _register_save_type[i  ] == Op_RegI &&
 952                is_save_on_entry(i-1) ) {
 953         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 954         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 955         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 956         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 957         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 958         mproj = C->top();
 959       } else {
 960         // Make a projection for it off the Start
 961         mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
 962       }
 963 
 964       ret_edge_cnt ++;
 965       reth_edge_cnt ++;
 966       tail_call_edge_cnt ++;
 967       tail_jump_edge_cnt ++;
 968       halt_edge_cnt ++;
 969 
 970       // Add a use of the SOE register to all exit paths
 971       for( uint j=1; j < root->req(); j++ )
 972         root->in(j)->add_req(mproj);
 973     } // End of if a save-on-entry register
 974   } // End of for all machine registers
 975 }
 976 
 977 //------------------------------init_spill_mask--------------------------------
 978 void Matcher::init_spill_mask( Node *ret ) {
 979   if( idealreg2regmask[Op_RegI] ) return; // One time only init
 980 
 981   OptoReg::c_frame_pointer = c_frame_pointer();
 982   c_frame_ptr_mask = c_frame_pointer();
 983 #ifdef _LP64
 984   // pointers are twice as big
 985   c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
 986 #endif
 987 
 988   // Start at OptoReg::stack0()
 989   STACK_ONLY_mask.Clear();
 990   OptoReg::Name init = OptoReg::stack2reg(0);
 991   // STACK_ONLY_mask is all stack bits
 992   OptoReg::Name i;
 993   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
 994     STACK_ONLY_mask.Insert(i);
 995   // Also set the "infinite stack" bit.
 996   STACK_ONLY_mask.set_AllStack();
 997 
 998   for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
 999     // Copy the register names over into the shared world.
1000     // SharedInfo::regName[i] = regName[i];
1001     // Handy RegMasks per machine register
1002     mreg2regmask[i].Insert(i);
1003 
1004     // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
1005     if (_register_save_policy[i] == 'C' ||
1006         _register_save_policy[i] == 'A') {
1007       caller_save_regmask.Insert(i);
1008       mh_caller_save_regmask.Insert(i);
1009     }
1010     // Exclude save-on-entry registers from debug masks for stub compilations.
1011     if (_register_save_policy[i] == 'C' ||
1012         _register_save_policy[i] == 'A' ||
1013         _register_save_policy[i] == 'E') {
1014       caller_save_regmask_exclude_soe.Insert(i);
1015       mh_caller_save_regmask_exclude_soe.Insert(i);
1016     }
1017   }
1018 
1019   // Also exclude the register we use to save the SP for MethodHandle
1020   // invokes to from the corresponding MH debug masks
1021   const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
1022   mh_caller_save_regmask.OR(sp_save_mask);
1023   mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
1024 
1025   // Grab the Frame Pointer
1026   Node *fp  = ret->in(TypeFunc::FramePtr);
1027   // Share frame pointer while making spill ops
1028   set_shared(fp);
1029 
1030 // Get the ADLC notion of the right regmask, for each basic type.
1031 #ifdef _LP64
1032   idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
1033 #endif
1034   idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
1035   idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
1036   idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
1037   idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
1038   idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
1039   idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
1040   idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
1041   idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
1042   idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
1043   idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
1044   idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
1045   idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
1046 }
1047 
1048 #ifdef ASSERT
1049 static void match_alias_type(Compile* C, Node* n, Node* m) {
1050   if (!VerifyAliases)  return;  // do not go looking for trouble by default
1051   const TypePtr* nat = n->adr_type();
1052   const TypePtr* mat = m->adr_type();
1053   int nidx = C->get_alias_index(nat);
1054   int midx = C->get_alias_index(mat);
1055   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
1056   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
1057     for (uint i = 1; i < n->req(); i++) {
1058       Node* n1 = n->in(i);
1059       const TypePtr* n1at = n1->adr_type();
1060       if (n1at != NULL) {
1061         nat = n1at;
1062         nidx = C->get_alias_index(n1at);
1063       }
1064     }
1065   }
1066   // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
1067   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1068     switch (n->Opcode()) {
1069     case Op_PrefetchAllocation:
1070       nidx = Compile::AliasIdxRaw;
1071       nat = TypeRawPtr::BOTTOM;
1072       break;
1073     }
1074   }
1075   if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1076     switch (n->Opcode()) {
1077     case Op_ClearArray:
1078       midx = Compile::AliasIdxRaw;
1079       mat = TypeRawPtr::BOTTOM;
1080       break;
1081     }
1082   }
1083   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1084     switch (n->Opcode()) {
1085     case Op_Return:
1086     case Op_Rethrow:
1087     case Op_Halt:
1088     case Op_TailCall:
1089     case Op_TailJump:
1090       nidx = Compile::AliasIdxBot;
1091       nat = TypePtr::BOTTOM;
1092       break;
1093     }
1094   }
1095   if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1096     switch (n->Opcode()) {
1097     case Op_StrComp:
1098     case Op_StrEquals:
1099     case Op_StrIndexOf:
1100     case Op_StrIndexOfChar:
1101     case Op_AryEq:
1102     case Op_CountPositives:
1103     case Op_MemBarVolatile:
1104     case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1105     case Op_StrInflatedCopy:
1106     case Op_StrCompressedCopy:
1107     case Op_OnSpinWait:
1108     case Op_EncodeISOArray:
1109       nidx = Compile::AliasIdxTop;
1110       nat = NULL;
1111       break;
1112     }
1113   }
1114   if (nidx != midx) {
1115     if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1116       tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1117       n->dump();
1118       m->dump();
1119     }
1120     assert(C->subsume_loads() && C->must_alias(nat, midx),
1121            "must not lose alias info when matching");
1122   }
1123 }
1124 #endif
1125 
1126 //------------------------------xform------------------------------------------
1127 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1128 // Node in new-space.  Given a new-space Node, recursively walk his children.
1129 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1130 Node *Matcher::xform( Node *n, int max_stack ) {
1131   // Use one stack to keep both: child's node/state and parent's node/index
1132   MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1133   mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
1134   while (mstack.is_nonempty()) {
1135     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1136     if (C->failing()) return NULL;
1137     n = mstack.node();          // Leave node on stack
1138     Node_State nstate = mstack.state();
1139     if (nstate == Visit) {
1140       mstack.set_state(Post_Visit);
1141       Node *oldn = n;
1142       // Old-space or new-space check
1143       if (!C->node_arena()->contains(n)) {
1144         // Old space!
1145         Node* m;
1146         if (has_new_node(n)) {  // Not yet Label/Reduced
1147           m = new_node(n);
1148         } else {
1149           if (!is_dontcare(n)) { // Matcher can match this guy
1150             // Calls match special.  They match alone with no children.
1151             // Their children, the incoming arguments, match normally.
1152             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1153             if (C->failing())  return NULL;
1154             if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1155             if (n->is_MemBar()) {
1156               m->as_MachMemBar()->set_adr_type(n->adr_type());
1157             }
1158           } else {                  // Nothing the matcher cares about
1159             if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) {       // Projections?
1160               // Convert to machine-dependent projection
1161               RegMask* mask = NULL;
1162               if (n->in(0)->is_Call() && n->in(0)->as_Call()->tf()->returns_inline_type_as_fields()) {
1163                 mask = return_values_mask(n->in(0)->as_Call()->tf());
1164               }
1165               m = n->in(0)->as_Multi()->match(n->as_Proj(), this, mask);
1166               NOT_PRODUCT(record_new2old(m, n);)
1167               if (m->in(0) != NULL) // m might be top
1168                 collect_null_checks(m, n);
1169             } else {                // Else just a regular 'ol guy
1170               m = n->clone();       // So just clone into new-space
1171               NOT_PRODUCT(record_new2old(m, n);)
1172               // Def-Use edges will be added incrementally as Uses
1173               // of this node are matched.
1174               assert(m->outcnt() == 0, "no Uses of this clone yet");
1175             }
1176           }
1177 
1178           set_new_node(n, m);       // Map old to new
1179           if (_old_node_note_array != NULL) {
1180             Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1181                                                   n->_idx);
1182             C->set_node_notes_at(m->_idx, nn);
1183           }
1184           debug_only(match_alias_type(C, n, m));
1185         }
1186         n = m;    // n is now a new-space node
1187         mstack.set_node(n);
1188       }
1189 
1190       // New space!
1191       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1192 
1193       int i;
1194       // Put precedence edges on stack first (match them last).
1195       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1196         Node *m = oldn->in(i);
1197         if (m == NULL) break;
1198         // set -1 to call add_prec() instead of set_req() during Step1
1199         mstack.push(m, Visit, n, -1);
1200       }
1201 
1202       // Handle precedence edges for interior nodes
1203       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1204         Node *m = n->in(i);
1205         if (m == NULL || C->node_arena()->contains(m)) continue;
1206         n->rm_prec(i);
1207         // set -1 to call add_prec() instead of set_req() during Step1
1208         mstack.push(m, Visit, n, -1);
1209       }
1210 
1211       // For constant debug info, I'd rather have unmatched constants.
1212       int cnt = n->req();
1213       JVMState* jvms = n->jvms();
1214       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1215 
1216       // Now do only debug info.  Clone constants rather than matching.
1217       // Constants are represented directly in the debug info without
1218       // the need for executable machine instructions.
1219       // Monitor boxes are also represented directly.
1220       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1221         Node *m = n->in(i);          // Get input
1222         int op = m->Opcode();
1223         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1224         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1225             op == Op_ConF || op == Op_ConD || op == Op_ConL
1226             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1227             ) {
1228           m = m->clone();
1229           NOT_PRODUCT(record_new2old(m, n));
1230           mstack.push(m, Post_Visit, n, i); // Don't need to visit
1231           mstack.push(m->in(0), Visit, m, 0);
1232         } else {
1233           mstack.push(m, Visit, n, i);
1234         }
1235       }
1236 
1237       // And now walk his children, and convert his inputs to new-space.
1238       for( ; i >= 0; --i ) { // For all normal inputs do
1239         Node *m = n->in(i);  // Get input
1240         if(m != NULL)
1241           mstack.push(m, Visit, n, i);
1242       }
1243 
1244     }
1245     else if (nstate == Post_Visit) {
1246       // Set xformed input
1247       Node *p = mstack.parent();
1248       if (p != NULL) { // root doesn't have parent
1249         int i = (int)mstack.index();
1250         if (i >= 0)
1251           p->set_req(i, n); // required input
1252         else if (i == -1)
1253           p->add_prec(n);   // precedence input
1254         else
1255           ShouldNotReachHere();
1256       }
1257       mstack.pop(); // remove processed node from stack
1258     }
1259     else {
1260       ShouldNotReachHere();
1261     }
1262   } // while (mstack.is_nonempty())
1263   return n; // Return new-space Node
1264 }
1265 
1266 //------------------------------warp_outgoing_stk_arg------------------------
1267 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1268   // Convert outgoing argument location to a pre-biased stack offset
1269   if (reg->is_stack()) {
1270     OptoReg::Name warped = reg->reg2stack();
1271     // Adjust the stack slot offset to be the register number used
1272     // by the allocator.
1273     warped = OptoReg::add(begin_out_arg_area, warped);
1274     // Keep track of the largest numbered stack slot used for an arg.
1275     // Largest used slot per call-site indicates the amount of stack
1276     // that is killed by the call.
1277     if( warped >= out_arg_limit_per_call )
1278       out_arg_limit_per_call = OptoReg::add(warped,1);
1279     if (!RegMask::can_represent_arg(warped)) {
1280       C->record_method_not_compilable("unsupported calling sequence");
1281       return OptoReg::Bad;
1282     }
1283     return warped;
1284   }
1285   return OptoReg::as_OptoReg(reg);
1286 }
1287 
1288 
1289 //------------------------------match_sfpt-------------------------------------
1290 // Helper function to match call instructions.  Calls match special.
1291 // They match alone with no children.  Their children, the incoming
1292 // arguments, match normally.
1293 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1294   MachSafePointNode *msfpt = NULL;
1295   MachCallNode      *mcall = NULL;
1296   uint               cnt;
1297   // Split out case for SafePoint vs Call
1298   CallNode *call;
1299   const TypeTuple *domain;
1300   ciMethod*        method = NULL;
1301   bool             is_method_handle_invoke = false;  // for special kill effects
1302   if( sfpt->is_Call() ) {
1303     call = sfpt->as_Call();
1304     domain = call->tf()->domain_cc();
1305     cnt = domain->cnt();
1306 
1307     // Match just the call, nothing else
1308     MachNode *m = match_tree(call);
1309     if (C->failing())  return NULL;
1310     if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1311 
1312     // Copy data from the Ideal SafePoint to the machine version
1313     mcall = m->as_MachCall();
1314 
1315     mcall->set_tf(                  call->tf());
1316     mcall->set_entry_point(         call->entry_point());
1317     mcall->set_cnt(                 call->cnt());
1318     mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1319 
1320     if( mcall->is_MachCallJava() ) {
1321       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1322       const CallJavaNode *call_java =  call->as_CallJava();
1323       assert(call_java->validate_symbolic_info(), "inconsistent info");
1324       method = call_java->method();
1325       mcall_java->_method = method;
1326       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1327       is_method_handle_invoke = call_java->is_method_handle_invoke();
1328       mcall_java->_method_handle_invoke = is_method_handle_invoke;
1329       mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1330       mcall_java->_arg_escape = call_java->arg_escape();
1331       if (is_method_handle_invoke) {
1332         C->set_has_method_handle_invokes(true);
1333       }
1334       if( mcall_java->is_MachCallStaticJava() )
1335         mcall_java->as_MachCallStaticJava()->_name =
1336          call_java->as_CallStaticJava()->_name;
1337       if( mcall_java->is_MachCallDynamicJava() )
1338         mcall_java->as_MachCallDynamicJava()->_vtable_index =
1339          call_java->as_CallDynamicJava()->_vtable_index;
1340     }
1341     else if( mcall->is_MachCallRuntime() ) {
1342       MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1343       mach_call_rt->_name = call->as_CallRuntime()->_name;
1344       mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1345     }
1346     msfpt = mcall;
1347   }
1348   // This is a non-call safepoint
1349   else {
1350     call = NULL;
1351     domain = NULL;
1352     MachNode *mn = match_tree(sfpt);
1353     if (C->failing())  return NULL;
1354     msfpt = mn->as_MachSafePoint();
1355     cnt = TypeFunc::Parms;
1356   }
1357   msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1358 
1359   // Advertise the correct memory effects (for anti-dependence computation).
1360   msfpt->set_adr_type(sfpt->adr_type());
1361 
1362   // Allocate a private array of RegMasks.  These RegMasks are not shared.
1363   msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1364   // Empty them all.
1365   for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1366 
1367   // Do all the pre-defined non-Empty register masks
1368   msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1369   msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1370 
1371   // Place first outgoing argument can possibly be put.
1372   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1373   assert( is_even(begin_out_arg_area), "" );
1374   // Compute max outgoing register number per call site.
1375   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1376   // Calls to C may hammer extra stack slots above and beyond any arguments.
1377   // These are usually backing store for register arguments for varargs.
1378   if( call != NULL && call->is_CallRuntime() )
1379     out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1380 
1381 
1382   // Do the normal argument list (parameters) register masks
1383   // Null entry point is a special cast where the target of the call
1384   // is in a register.
1385   int adj = (call != NULL && call->entry_point() == NULL) ? 1 : 0;
1386   int argcnt = cnt - TypeFunc::Parms - adj;
1387   if( argcnt > 0 ) {          // Skip it all if we have no args
1388     BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1389     VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1390     int i;
1391     for( i = 0; i < argcnt; i++ ) {
1392       sig_bt[i] = domain->field_at(i+TypeFunc::Parms+adj)->basic_type();
1393     }
1394     // V-call to pick proper calling convention
1395     call->calling_convention( sig_bt, parm_regs, argcnt );
1396 
1397 #ifdef ASSERT
1398     // Sanity check users' calling convention.  Really handy during
1399     // the initial porting effort.  Fairly expensive otherwise.
1400     { for (int i = 0; i<argcnt; i++) {
1401       if( !parm_regs[i].first()->is_valid() &&
1402           !parm_regs[i].second()->is_valid() ) continue;
1403       VMReg reg1 = parm_regs[i].first();
1404       VMReg reg2 = parm_regs[i].second();
1405       for (int j = 0; j < i; j++) {
1406         if( !parm_regs[j].first()->is_valid() &&
1407             !parm_regs[j].second()->is_valid() ) continue;
1408         VMReg reg3 = parm_regs[j].first();
1409         VMReg reg4 = parm_regs[j].second();
1410         if( !reg1->is_valid() ) {
1411           assert( !reg2->is_valid(), "valid halvsies" );
1412         } else if( !reg3->is_valid() ) {
1413           assert( !reg4->is_valid(), "valid halvsies" );
1414         } else {
1415           assert( reg1 != reg2, "calling conv. must produce distinct regs");
1416           assert( reg1 != reg3, "calling conv. must produce distinct regs");
1417           assert( reg1 != reg4, "calling conv. must produce distinct regs");
1418           assert( reg2 != reg3, "calling conv. must produce distinct regs");
1419           assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1420           assert( reg3 != reg4, "calling conv. must produce distinct regs");
1421         }
1422       }
1423     }
1424     }
1425 #endif
1426 
1427     // Visit each argument.  Compute its outgoing register mask.
1428     // Return results now can have 2 bits returned.
1429     // Compute max over all outgoing arguments both per call-site
1430     // and over the entire method.
1431     for( i = 0; i < argcnt; i++ ) {
1432       // Address of incoming argument mask to fill in
1433       RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms+adj];
1434       VMReg first = parm_regs[i].first();
1435       VMReg second = parm_regs[i].second();
1436       if(!first->is_valid() &&
1437          !second->is_valid()) {
1438         continue;               // Avoid Halves
1439       }
1440       // Handle case where arguments are in vector registers.
1441       if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1442         OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1443         OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1444         assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1445         for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1446           rm->Insert(r);
1447         }
1448       }
1449       // Grab first register, adjust stack slots and insert in mask.
1450       OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1451       if (OptoReg::is_valid(reg1)) {
1452         rm->Insert( reg1 );
1453       }
1454       // Grab second register (if any), adjust stack slots and insert in mask.
1455       OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1456       if (OptoReg::is_valid(reg2)) {
1457         rm->Insert( reg2 );
1458       }
1459     } // End of for all arguments
1460   }
1461 
1462   // Compute the max stack slot killed by any call.  These will not be
1463   // available for debug info, and will be used to adjust FIRST_STACK_mask
1464   // after all call sites have been visited.
1465   if( _out_arg_limit < out_arg_limit_per_call)
1466     _out_arg_limit = out_arg_limit_per_call;
1467 
1468   if (mcall) {
1469     // Kill the outgoing argument area, including any non-argument holes and
1470     // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1471     // Since the max-per-method covers the max-per-call-site and debug info
1472     // is excluded on the max-per-method basis, debug info cannot land in
1473     // this killed area.
1474     uint r_cnt = mcall->tf()->range_sig()->cnt();
1475     MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1476     if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1477       C->record_method_not_compilable("unsupported outgoing calling sequence");
1478     } else {
1479       for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1480         proj->_rout.Insert(OptoReg::Name(i));
1481     }
1482     if (proj->_rout.is_NotEmpty()) {
1483       push_projection(proj);
1484     }
1485   }
1486   // Transfer the safepoint information from the call to the mcall
1487   // Move the JVMState list
1488   msfpt->set_jvms(sfpt->jvms());
1489   for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1490     jvms->set_map(sfpt);
1491   }
1492 
1493   // Debug inputs begin just after the last incoming parameter
1494   assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1495          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain_cc()->cnt()), "");
1496 
1497   // Add additional edges.
1498   if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1499     // For these calls we can not add MachConstantBase in expand(), as the
1500     // ins are not complete then.
1501     msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1502     if (msfpt->jvms() &&
1503         msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1504       // We added an edge before jvms, so we must adapt the position of the ins.
1505       msfpt->jvms()->adapt_position(+1);
1506     }
1507   }
1508 
1509   // Registers killed by the call are set in the local scheduling pass
1510   // of Global Code Motion.
1511   return msfpt;
1512 }
1513 
1514 //---------------------------match_tree----------------------------------------
1515 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1516 // of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1517 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1518 // a Load's result RegMask for memoization in idealreg2regmask[]
1519 MachNode *Matcher::match_tree( const Node *n ) {
1520   assert( n->Opcode() != Op_Phi, "cannot match" );
1521   assert( !n->is_block_start(), "cannot match" );
1522   // Set the mark for all locally allocated State objects.
1523   // When this call returns, the _states_arena arena will be reset
1524   // freeing all State objects.
1525   ResourceMark rm( &_states_arena );
1526 
1527   LabelRootDepth = 0;
1528 
1529   // StoreNodes require their Memory input to match any LoadNodes
1530   Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1531 #ifdef ASSERT
1532   Node* save_mem_node = _mem_node;
1533   _mem_node = n->is_Store() ? (Node*)n : NULL;
1534 #endif
1535   // State object for root node of match tree
1536   // Allocate it on _states_arena - stack allocation can cause stack overflow.
1537   State *s = new (&_states_arena) State;
1538   s->_kids[0] = NULL;
1539   s->_kids[1] = NULL;
1540   s->_leaf = (Node*)n;
1541   // Label the input tree, allocating labels from top-level arena
1542   Node* root_mem = mem;
1543   Label_Root(n, s, n->in(0), root_mem);
1544   if (C->failing())  return NULL;
1545 
1546   // The minimum cost match for the whole tree is found at the root State
1547   uint mincost = max_juint;
1548   uint cost = max_juint;
1549   uint i;
1550   for (i = 0; i < NUM_OPERANDS; i++) {
1551     if (s->valid(i) &&               // valid entry and
1552         s->cost(i) < cost &&         // low cost and
1553         s->rule(i) >= NUM_OPERANDS) {// not an operand
1554       mincost = i;
1555       cost = s->cost(i);
1556     }
1557   }
1558   if (mincost == max_juint) {
1559 #ifndef PRODUCT
1560     tty->print("No matching rule for:");
1561     s->dump();
1562 #endif
1563     Matcher::soft_match_failure();
1564     return NULL;
1565   }
1566   // Reduce input tree based upon the state labels to machine Nodes
1567   MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1568   // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1569   NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1570 
1571   // Add any Matcher-ignored edges
1572   uint cnt = n->req();
1573   uint start = 1;
1574   if( mem != (Node*)1 ) start = MemNode::Memory+1;
1575   if( n->is_AddP() ) {
1576     assert( mem == (Node*)1, "" );
1577     start = AddPNode::Base+1;
1578   }
1579   for( i = start; i < cnt; i++ ) {
1580     if( !n->match_edge(i) ) {
1581       if( i < m->req() )
1582         m->ins_req( i, n->in(i) );
1583       else
1584         m->add_req( n->in(i) );
1585     }
1586   }
1587 
1588   debug_only( _mem_node = save_mem_node; )
1589   return m;
1590 }
1591 
1592 
1593 //------------------------------match_into_reg---------------------------------
1594 // Choose to either match this Node in a register or part of the current
1595 // match tree.  Return true for requiring a register and false for matching
1596 // as part of the current match tree.
1597 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1598 
1599   const Type *t = m->bottom_type();
1600 
1601   if (t->singleton()) {
1602     // Never force constants into registers.  Allow them to match as
1603     // constants or registers.  Copies of the same value will share
1604     // the same register.  See find_shared_node.
1605     return false;
1606   } else {                      // Not a constant
1607     // Stop recursion if they have different Controls.
1608     Node* m_control = m->in(0);
1609     // Control of load's memory can post-dominates load's control.
1610     // So use it since load can't float above its memory.
1611     Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1612     if (control && m_control && control != m_control && control != mem_control) {
1613 
1614       // Actually, we can live with the most conservative control we
1615       // find, if it post-dominates the others.  This allows us to
1616       // pick up load/op/store trees where the load can float a little
1617       // above the store.
1618       Node *x = control;
1619       const uint max_scan = 6;  // Arbitrary scan cutoff
1620       uint j;
1621       for (j=0; j<max_scan; j++) {
1622         if (x->is_Region())     // Bail out at merge points
1623           return true;
1624         x = x->in(0);
1625         if (x == m_control)     // Does 'control' post-dominate
1626           break;                // m->in(0)?  If so, we can use it
1627         if (x == mem_control)   // Does 'control' post-dominate
1628           break;                // mem_control?  If so, we can use it
1629       }
1630       if (j == max_scan)        // No post-domination before scan end?
1631         return true;            // Then break the match tree up
1632     }
1633     if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1634         (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1635       // These are commonly used in address expressions and can
1636       // efficiently fold into them on X64 in some cases.
1637       return false;
1638     }
1639   }
1640 
1641   // Not forceable cloning.  If shared, put it into a register.
1642   return shared;
1643 }
1644 
1645 
1646 //------------------------------Instruction Selection--------------------------
1647 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1648 // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1649 // things the Matcher does not match (e.g., Memory), and things with different
1650 // Controls (hence forced into different blocks).  We pass in the Control
1651 // selected for this entire State tree.
1652 
1653 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1654 // Store and the Load must have identical Memories (as well as identical
1655 // pointers).  Since the Matcher does not have anything for Memory (and
1656 // does not handle DAGs), I have to match the Memory input myself.  If the
1657 // Tree root is a Store or if there are multiple Loads in the tree, I require
1658 // all Loads to have the identical memory.
1659 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1660   // Since Label_Root is a recursive function, its possible that we might run
1661   // out of stack space.  See bugs 6272980 & 6227033 for more info.
1662   LabelRootDepth++;
1663   if (LabelRootDepth > MaxLabelRootDepth) {
1664     C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1665     return NULL;
1666   }
1667   uint care = 0;                // Edges matcher cares about
1668   uint cnt = n->req();
1669   uint i = 0;
1670 
1671   // Examine children for memory state
1672   // Can only subsume a child into your match-tree if that child's memory state
1673   // is not modified along the path to another input.
1674   // It is unsafe even if the other inputs are separate roots.
1675   Node *input_mem = NULL;
1676   for( i = 1; i < cnt; i++ ) {
1677     if( !n->match_edge(i) ) continue;
1678     Node *m = n->in(i);         // Get ith input
1679     assert( m, "expect non-null children" );
1680     if( m->is_Load() ) {
1681       if( input_mem == NULL ) {
1682         input_mem = m->in(MemNode::Memory);
1683         if (mem == (Node*)1) {
1684           // Save this memory to bail out if there's another memory access
1685           // to a different memory location in the same tree.
1686           mem = input_mem;
1687         }
1688       } else if( input_mem != m->in(MemNode::Memory) ) {
1689         input_mem = NodeSentinel;
1690       }
1691     }
1692   }
1693 
1694   for( i = 1; i < cnt; i++ ){// For my children
1695     if( !n->match_edge(i) ) continue;
1696     Node *m = n->in(i);         // Get ith input
1697     // Allocate states out of a private arena
1698     State *s = new (&_states_arena) State;
1699     svec->_kids[care++] = s;
1700     assert( care <= 2, "binary only for now" );
1701 
1702     // Recursively label the State tree.
1703     s->_kids[0] = NULL;
1704     s->_kids[1] = NULL;
1705     s->_leaf = m;
1706 
1707     // Check for leaves of the State Tree; things that cannot be a part of
1708     // the current tree.  If it finds any, that value is matched as a
1709     // register operand.  If not, then the normal matching is used.
1710     if( match_into_reg(n, m, control, i, is_shared(m)) ||
1711         // Stop recursion if this is a LoadNode and there is another memory access
1712         // to a different memory location in the same tree (for example, a StoreNode
1713         // at the root of this tree or another LoadNode in one of the children).
1714         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1715         // Can NOT include the match of a subtree when its memory state
1716         // is used by any of the other subtrees
1717         (input_mem == NodeSentinel) ) {
1718       // Print when we exclude matching due to different memory states at input-loads
1719       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1720           && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1721         tty->print_cr("invalid input_mem");
1722       }
1723       // Switch to a register-only opcode; this value must be in a register
1724       // and cannot be subsumed as part of a larger instruction.
1725       s->DFA( m->ideal_reg(), m );
1726 
1727     } else {
1728       // If match tree has no control and we do, adopt it for entire tree
1729       if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1730         control = m->in(0);         // Pick up control
1731       // Else match as a normal part of the match tree.
1732       control = Label_Root(m, s, control, mem);
1733       if (C->failing()) return NULL;
1734     }
1735   }
1736 
1737   // Call DFA to match this node, and return
1738   svec->DFA( n->Opcode(), n );
1739 
1740 #ifdef ASSERT
1741   uint x;
1742   for( x = 0; x < _LAST_MACH_OPER; x++ )
1743     if( svec->valid(x) )
1744       break;
1745 
1746   if (x >= _LAST_MACH_OPER) {
1747     n->dump();
1748     svec->dump();
1749     assert( false, "bad AD file" );
1750   }
1751 #endif
1752   return control;
1753 }
1754 
1755 
1756 // Con nodes reduced using the same rule can share their MachNode
1757 // which reduces the number of copies of a constant in the final
1758 // program.  The register allocator is free to split uses later to
1759 // split live ranges.
1760 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1761   if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1762 
1763   // See if this Con has already been reduced using this rule.
1764   if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1765   MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1766   if (last != NULL && rule == last->rule()) {
1767     // Don't expect control change for DecodeN
1768     if (leaf->is_DecodeNarrowPtr())
1769       return last;
1770     // Get the new space root.
1771     Node* xroot = new_node(C->root());
1772     if (xroot == NULL) {
1773       // This shouldn't happen give the order of matching.
1774       return NULL;
1775     }
1776 
1777     // Shared constants need to have their control be root so they
1778     // can be scheduled properly.
1779     Node* control = last->in(0);
1780     if (control != xroot) {
1781       if (control == NULL || control == C->root()) {
1782         last->set_req(0, xroot);
1783       } else {
1784         assert(false, "unexpected control");
1785         return NULL;
1786       }
1787     }
1788     return last;
1789   }
1790   return NULL;
1791 }
1792 
1793 
1794 //------------------------------ReduceInst-------------------------------------
1795 // Reduce a State tree (with given Control) into a tree of MachNodes.
1796 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1797 // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1798 // Each MachNode has a number of complicated MachOper operands; each
1799 // MachOper also covers a further tree of Ideal Nodes.
1800 
1801 // The root of the Ideal match tree is always an instruction, so we enter
1802 // the recursion here.  After building the MachNode, we need to recurse
1803 // the tree checking for these cases:
1804 // (1) Child is an instruction -
1805 //     Build the instruction (recursively), add it as an edge.
1806 //     Build a simple operand (register) to hold the result of the instruction.
1807 // (2) Child is an interior part of an instruction -
1808 //     Skip over it (do nothing)
1809 // (3) Child is the start of a operand -
1810 //     Build the operand, place it inside the instruction
1811 //     Call ReduceOper.
1812 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1813   assert( rule >= NUM_OPERANDS, "called with operand rule" );
1814 
1815   MachNode* shared_node = find_shared_node(s->_leaf, rule);
1816   if (shared_node != NULL) {
1817     return shared_node;
1818   }
1819 
1820   // Build the object to represent this state & prepare for recursive calls
1821   MachNode *mach = s->MachNodeGenerator(rule);
1822   guarantee(mach != NULL, "Missing MachNode");
1823   mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1824   assert( mach->_opnds[0] != NULL, "Missing result operand" );
1825   Node *leaf = s->_leaf;
1826   NOT_PRODUCT(record_new2old(mach, leaf);)
1827   // Check for instruction or instruction chain rule
1828   if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1829     assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1830            "duplicating node that's already been matched");
1831     // Instruction
1832     mach->add_req( leaf->in(0) ); // Set initial control
1833     // Reduce interior of complex instruction
1834     ReduceInst_Interior( s, rule, mem, mach, 1 );
1835   } else {
1836     // Instruction chain rules are data-dependent on their inputs
1837     mach->add_req(0);             // Set initial control to none
1838     ReduceInst_Chain_Rule( s, rule, mem, mach );
1839   }
1840 
1841   // If a Memory was used, insert a Memory edge
1842   if( mem != (Node*)1 ) {
1843     mach->ins_req(MemNode::Memory,mem);
1844 #ifdef ASSERT
1845     // Verify adr type after matching memory operation
1846     const MachOper* oper = mach->memory_operand();
1847     if (oper != NULL && oper != (MachOper*)-1) {
1848       // It has a unique memory operand.  Find corresponding ideal mem node.
1849       Node* m = NULL;
1850       if (leaf->is_Mem()) {
1851         m = leaf;
1852       } else {
1853         m = _mem_node;
1854         assert(m != NULL && m->is_Mem(), "expecting memory node");
1855       }
1856       const Type* mach_at = mach->adr_type();
1857       // DecodeN node consumed by an address may have different type
1858       // than its input. Don't compare types for such case.
1859       if (m->adr_type() != mach_at &&
1860           (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1861            (m->in(MemNode::Address)->is_AddP() &&
1862             m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1863            (m->in(MemNode::Address)->is_AddP() &&
1864             m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1865             m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1866         mach_at = m->adr_type();
1867       }
1868       if (m->adr_type() != mach_at) {
1869         m->dump();
1870         tty->print_cr("mach:");
1871         mach->dump(1);
1872       }
1873       assert(m->adr_type() == mach_at, "matcher should not change adr type");
1874     }
1875 #endif
1876   }
1877 
1878   // If the _leaf is an AddP, insert the base edge
1879   if (leaf->is_AddP()) {
1880     mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1881   }
1882 
1883   uint number_of_projections_prior = number_of_projections();
1884 
1885   // Perform any 1-to-many expansions required
1886   MachNode *ex = mach->Expand(s, _projection_list, mem);
1887   if (ex != mach) {
1888     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1889     if( ex->in(1)->is_Con() )
1890       ex->in(1)->set_req(0, C->root());
1891     // Remove old node from the graph
1892     for( uint i=0; i<mach->req(); i++ ) {
1893       mach->set_req(i,NULL);
1894     }
1895     NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1896   }
1897 
1898   // PhaseChaitin::fixup_spills will sometimes generate spill code
1899   // via the matcher.  By the time, nodes have been wired into the CFG,
1900   // and any further nodes generated by expand rules will be left hanging
1901   // in space, and will not get emitted as output code.  Catch this.
1902   // Also, catch any new register allocation constraints ("projections")
1903   // generated belatedly during spill code generation.
1904   if (_allocation_started) {
1905     guarantee(ex == mach, "no expand rules during spill generation");
1906     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1907   }
1908 
1909   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1910     // Record the con for sharing
1911     _shared_nodes.map(leaf->_idx, ex);
1912   }
1913 
1914   // Have mach nodes inherit GC barrier data
1915   if (leaf->is_LoadStore()) {
1916     mach->set_barrier_data(leaf->as_LoadStore()->barrier_data());
1917   } else if (leaf->is_Mem()) {
1918     mach->set_barrier_data(leaf->as_Mem()->barrier_data());
1919   }
1920 
1921   return ex;
1922 }
1923 
1924 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1925   for (uint i = n->req(); i < n->len(); i++) {
1926     if (n->in(i) != NULL) {
1927       mach->add_prec(n->in(i));
1928     }
1929   }
1930 }
1931 
1932 void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1933   // 'op' is what I am expecting to receive
1934   int op = _leftOp[rule];
1935   // Operand type to catch childs result
1936   // This is what my child will give me.
1937   unsigned int opnd_class_instance = s->rule(op);
1938   // Choose between operand class or not.
1939   // This is what I will receive.
1940   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1941   // New rule for child.  Chase operand classes to get the actual rule.
1942   unsigned int newrule = s->rule(catch_op);
1943 
1944   if (newrule < NUM_OPERANDS) {
1945     // Chain from operand or operand class, may be output of shared node
1946     assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1947     // Insert operand into array of operands for this instruction
1948     mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1949 
1950     ReduceOper(s, newrule, mem, mach);
1951   } else {
1952     // Chain from the result of an instruction
1953     assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1954     mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1955     Node *mem1 = (Node*)1;
1956     debug_only(Node *save_mem_node = _mem_node;)
1957     mach->add_req( ReduceInst(s, newrule, mem1) );
1958     debug_only(_mem_node = save_mem_node;)
1959   }
1960   return;
1961 }
1962 
1963 
1964 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1965   handle_precedence_edges(s->_leaf, mach);
1966 
1967   if( s->_leaf->is_Load() ) {
1968     Node *mem2 = s->_leaf->in(MemNode::Memory);
1969     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1970     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1971     mem = mem2;
1972   }
1973   if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1974     if( mach->in(0) == NULL )
1975       mach->set_req(0, s->_leaf->in(0));
1976   }
1977 
1978   // Now recursively walk the state tree & add operand list.
1979   for( uint i=0; i<2; i++ ) {   // binary tree
1980     State *newstate = s->_kids[i];
1981     if( newstate == NULL ) break;      // Might only have 1 child
1982     // 'op' is what I am expecting to receive
1983     int op;
1984     if( i == 0 ) {
1985       op = _leftOp[rule];
1986     } else {
1987       op = _rightOp[rule];
1988     }
1989     // Operand type to catch childs result
1990     // This is what my child will give me.
1991     int opnd_class_instance = newstate->rule(op);
1992     // Choose between operand class or not.
1993     // This is what I will receive.
1994     int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1995     // New rule for child.  Chase operand classes to get the actual rule.
1996     int newrule = newstate->rule(catch_op);
1997 
1998     if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
1999       // Operand/operandClass
2000       // Insert operand into array of operands for this instruction
2001       mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
2002       ReduceOper(newstate, newrule, mem, mach);
2003 
2004     } else {                    // Child is internal operand or new instruction
2005       if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
2006         // internal operand --> call ReduceInst_Interior
2007         // Interior of complex instruction.  Do nothing but recurse.
2008         num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
2009       } else {
2010         // instruction --> call build operand(  ) to catch result
2011         //             --> ReduceInst( newrule )
2012         mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
2013         Node *mem1 = (Node*)1;
2014         debug_only(Node *save_mem_node = _mem_node;)
2015         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
2016         debug_only(_mem_node = save_mem_node;)
2017       }
2018     }
2019     assert( mach->_opnds[num_opnds-1], "" );
2020   }
2021   return num_opnds;
2022 }
2023 
2024 // This routine walks the interior of possible complex operands.
2025 // At each point we check our children in the match tree:
2026 // (1) No children -
2027 //     We are a leaf; add _leaf field as an input to the MachNode
2028 // (2) Child is an internal operand -
2029 //     Skip over it ( do nothing )
2030 // (3) Child is an instruction -
2031 //     Call ReduceInst recursively and
2032 //     and instruction as an input to the MachNode
2033 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
2034   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
2035   State *kid = s->_kids[0];
2036   assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
2037 
2038   // Leaf?  And not subsumed?
2039   if( kid == NULL && !_swallowed[rule] ) {
2040     mach->add_req( s->_leaf );  // Add leaf pointer
2041     return;                     // Bail out
2042   }
2043 
2044   if( s->_leaf->is_Load() ) {
2045     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
2046     mem = s->_leaf->in(MemNode::Memory);
2047     debug_only(_mem_node = s->_leaf;)
2048   }
2049 
2050   handle_precedence_edges(s->_leaf, mach);
2051 
2052   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
2053     if( !mach->in(0) )
2054       mach->set_req(0,s->_leaf->in(0));
2055     else {
2056       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
2057     }
2058   }
2059 
2060   for (uint i = 0; kid != NULL && i < 2; kid = s->_kids[1], i++) {   // binary tree
2061     int newrule;
2062     if( i == 0) {
2063       newrule = kid->rule(_leftOp[rule]);
2064     } else {
2065       newrule = kid->rule(_rightOp[rule]);
2066     }
2067 
2068     if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2069       // Internal operand; recurse but do nothing else
2070       ReduceOper(kid, newrule, mem, mach);
2071 
2072     } else {                    // Child is a new instruction
2073       // Reduce the instruction, and add a direct pointer from this
2074       // machine instruction to the newly reduced one.
2075       Node *mem1 = (Node*)1;
2076       debug_only(Node *save_mem_node = _mem_node;)
2077       mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2078       debug_only(_mem_node = save_mem_node;)
2079     }
2080   }
2081 }
2082 
2083 
2084 // -------------------------------------------------------------------------
2085 // Java-Java calling convention
2086 // (what you use when Java calls Java)
2087 
2088 //------------------------------find_receiver----------------------------------
2089 // For a given signature, return the OptoReg for parameter 0.
2090 OptoReg::Name Matcher::find_receiver() {
2091   VMRegPair regs;
2092   BasicType sig_bt = T_OBJECT;
2093   SharedRuntime::java_calling_convention(&sig_bt, &regs, 1);
2094   // Return argument 0 register.  In the LP64 build pointers
2095   // take 2 registers, but the VM wants only the 'main' name.
2096   return OptoReg::as_OptoReg(regs.first());
2097 }
2098 
2099 bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2100   if (n != NULL && m != NULL) {
2101     return VectorNode::is_vector_shift(n) &&
2102            VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2103   }
2104   return false;
2105 }
2106 
2107 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2108   // Must clone all producers of flags, or we will not match correctly.
2109   // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2110   // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2111   // are also there, so we may match a float-branch to int-flags and
2112   // expect the allocator to haul the flags from the int-side to the
2113   // fp-side.  No can do.
2114   if (_must_clone[m->Opcode()]) {
2115     mstack.push(m, Visit);
2116     return true;
2117   }
2118   return pd_clone_node(n, m, mstack);
2119 }
2120 
2121 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2122   Node *off = m->in(AddPNode::Offset);
2123   if (off->is_Con()) {
2124     address_visited.test_set(m->_idx); // Flag as address_visited
2125     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2126     // Clone X+offset as it also folds into most addressing expressions
2127     mstack.push(off, Visit);
2128     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2129     return true;
2130   }
2131   return false;
2132 }
2133 
2134 // A method-klass-holder may be passed in the inline_cache_reg
2135 // and then expanded into the inline_cache_reg and a method_ptr register
2136 //   defined in ad_<arch>.cpp
2137 
2138 //------------------------------find_shared------------------------------------
2139 // Set bits if Node is shared or otherwise a root
2140 void Matcher::find_shared(Node* n) {
2141   // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2142   MStack mstack(C->live_nodes() * 2);
2143   // Mark nodes as address_visited if they are inputs to an address expression
2144   VectorSet address_visited;
2145   mstack.push(n, Visit);     // Don't need to pre-visit root node
2146   while (mstack.is_nonempty()) {
2147     n = mstack.node();       // Leave node on stack
2148     Node_State nstate = mstack.state();
2149     uint nop = n->Opcode();
2150     if (nstate == Pre_Visit) {
2151       if (address_visited.test(n->_idx)) { // Visited in address already?
2152         // Flag as visited and shared now.
2153         set_visited(n);
2154       }
2155       if (is_visited(n)) {   // Visited already?
2156         // Node is shared and has no reason to clone.  Flag it as shared.
2157         // This causes it to match into a register for the sharing.
2158         set_shared(n);       // Flag as shared and
2159         if (n->is_DecodeNarrowPtr()) {
2160           // Oop field/array element loads must be shared but since
2161           // they are shared through a DecodeN they may appear to have
2162           // a single use so force sharing here.
2163           set_shared(n->in(1));
2164         }
2165         mstack.pop();        // remove node from stack
2166         continue;
2167       }
2168       nstate = Visit; // Not already visited; so visit now
2169     }
2170     if (nstate == Visit) {
2171       mstack.set_state(Post_Visit);
2172       set_visited(n);   // Flag as visited now
2173       bool mem_op = false;
2174       int mem_addr_idx = MemNode::Address;
2175       if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2176         continue;
2177       }
2178       for (int i = n->len() - 1; i >= 0; --i) { // For my children
2179         Node* m = n->in(i); // Get ith input
2180         if (m == NULL) {
2181           continue;  // Ignore NULLs
2182         }
2183         if (clone_node(n, m, mstack)) {
2184           continue;
2185         }
2186 
2187         // Clone addressing expressions as they are "free" in memory access instructions
2188         if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2189             // When there are other uses besides address expressions
2190             // put it on stack and mark as shared.
2191             !is_visited(m)) {
2192           // Some inputs for address expression are not put on stack
2193           // to avoid marking them as shared and forcing them into register
2194           // if they are used only in address expressions.
2195           // But they should be marked as shared if there are other uses
2196           // besides address expressions.
2197 
2198           if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2199             continue;
2200           }
2201         }   // if( mem_op &&
2202         mstack.push(m, Pre_Visit);
2203       }     // for(int i = ...)
2204     }
2205     else if (nstate == Alt_Post_Visit) {
2206       mstack.pop(); // Remove node from stack
2207       // We cannot remove the Cmp input from the Bool here, as the Bool may be
2208       // shared and all users of the Bool need to move the Cmp in parallel.
2209       // This leaves both the Bool and the If pointing at the Cmp.  To
2210       // prevent the Matcher from trying to Match the Cmp along both paths
2211       // BoolNode::match_edge always returns a zero.
2212 
2213       // We reorder the Op_If in a pre-order manner, so we can visit without
2214       // accidentally sharing the Cmp (the Bool and the If make 2 users).
2215       n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2216     }
2217     else if (nstate == Post_Visit) {
2218       mstack.pop(); // Remove node from stack
2219 
2220       // Now hack a few special opcodes
2221       uint opcode = n->Opcode();
2222       bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2223       if (!gc_handled) {
2224         find_shared_post_visit(n, opcode);
2225       }
2226     }
2227     else {
2228       ShouldNotReachHere();
2229     }
2230   } // end of while (mstack.is_nonempty())
2231 }
2232 
2233 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2234   switch(opcode) {  // Handle some opcodes special
2235     case Op_Phi:             // Treat Phis as shared roots
2236     case Op_Parm:
2237     case Op_Proj:            // All handled specially during matching
2238     case Op_SafePointScalarObject:
2239       set_shared(n);
2240       set_dontcare(n);
2241       break;
2242     case Op_If:
2243     case Op_CountedLoopEnd:
2244       mstack.set_state(Alt_Post_Visit); // Alternative way
2245       // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2246       // with matching cmp/branch in 1 instruction.  The Matcher needs the
2247       // Bool and CmpX side-by-side, because it can only get at constants
2248       // that are at the leaves of Match trees, and the Bool's condition acts
2249       // as a constant here.
2250       mstack.push(n->in(1), Visit);         // Clone the Bool
2251       mstack.push(n->in(0), Pre_Visit);     // Visit control input
2252       return true; // while (mstack.is_nonempty())
2253     case Op_ConvI2D:         // These forms efficiently match with a prior
2254     case Op_ConvI2F:         //   Load but not a following Store
2255       if( n->in(1)->is_Load() &&        // Prior load
2256           n->outcnt() == 1 &&           // Not already shared
2257           n->unique_out()->is_Store() ) // Following store
2258         set_shared(n);       // Force it to be a root
2259       break;
2260     case Op_ReverseBytesI:
2261     case Op_ReverseBytesL:
2262       if( n->in(1)->is_Load() &&        // Prior load
2263           n->outcnt() == 1 )            // Not already shared
2264         set_shared(n);                  // Force it to be a root
2265       break;
2266     case Op_BoxLock:         // Can't match until we get stack-regs in ADLC
2267     case Op_IfFalse:
2268     case Op_IfTrue:
2269     case Op_MachProj:
2270     case Op_MergeMem:
2271     case Op_Catch:
2272     case Op_CatchProj:
2273     case Op_CProj:
2274     case Op_JumpProj:
2275     case Op_JProj:
2276     case Op_NeverBranch:
2277       set_dontcare(n);
2278       break;
2279     case Op_Jump:
2280       mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2281       mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2282       return true;                             // while (mstack.is_nonempty())
2283     case Op_StrComp:
2284     case Op_StrEquals:
2285     case Op_StrIndexOf:
2286     case Op_StrIndexOfChar:
2287     case Op_AryEq:
2288     case Op_CountPositives:
2289     case Op_StrInflatedCopy:
2290     case Op_StrCompressedCopy:
2291     case Op_EncodeISOArray:
2292     case Op_FmaD:
2293     case Op_FmaF:
2294     case Op_FmaVD:
2295     case Op_FmaVF:
2296     case Op_MacroLogicV:
2297     case Op_VectorCmpMasked:
2298     case Op_CompressV:
2299     case Op_CompressM:
2300     case Op_ExpandV:
2301     case Op_VectorLoadMask:
2302       set_shared(n); // Force result into register (it will be anyways)
2303       break;
2304     case Op_ConP: {  // Convert pointers above the centerline to NUL
2305       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2306       const TypePtr* tp = tn->type()->is_ptr();
2307       if (tp->_ptr == TypePtr::AnyNull) {
2308         tn->set_type(TypePtr::NULL_PTR);
2309       }
2310       break;
2311     }
2312     case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2313       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2314       const TypePtr* tp = tn->type()->make_ptr();
2315       if (tp && tp->_ptr == TypePtr::AnyNull) {
2316         tn->set_type(TypeNarrowOop::NULL_PTR);
2317       }
2318       break;
2319     }
2320     case Op_Binary:         // These are introduced in the Post_Visit state.
2321       ShouldNotReachHere();
2322       break;
2323     case Op_ClearArray:
2324     case Op_SafePoint:
2325       mem_op = true;
2326       break;
2327     default:
2328       if( n->is_Store() ) {
2329         // Do match stores, despite no ideal reg
2330         mem_op = true;
2331         break;
2332       }
2333       if( n->is_Mem() ) { // Loads and LoadStores
2334         mem_op = true;
2335         // Loads must be root of match tree due to prior load conflict
2336         if( C->subsume_loads() == false )
2337           set_shared(n);
2338       }
2339       // Fall into default case
2340       if( !n->ideal_reg() )
2341         set_dontcare(n);  // Unmatchable Nodes
2342   } // end_switch
2343   return false;
2344 }
2345 
2346 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2347   if (n->is_predicated_vector()) {
2348     // Restructure into binary trees for Matching.
2349     if (n->req() == 4) {
2350       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2351       n->set_req(2, n->in(3));
2352       n->del_req(3);
2353     } else if (n->req() == 5) {
2354       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2355       n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2356       n->del_req(4);
2357       n->del_req(3);
2358     } else if (n->req() == 6) {
2359       Node* b3 = new BinaryNode(n->in(4), n->in(5));
2360       Node* b2 = new BinaryNode(n->in(3), b3);
2361       Node* b1 = new BinaryNode(n->in(2), b2);
2362       n->set_req(2, b1);
2363       n->del_req(5);
2364       n->del_req(4);
2365       n->del_req(3);
2366     }
2367     return;
2368   }
2369 
2370   switch(opcode) {       // Handle some opcodes special
2371     case Op_CompareAndExchangeB:
2372     case Op_CompareAndExchangeS:
2373     case Op_CompareAndExchangeI:
2374     case Op_CompareAndExchangeL:
2375     case Op_CompareAndExchangeP:
2376     case Op_CompareAndExchangeN:
2377     case Op_WeakCompareAndSwapB:
2378     case Op_WeakCompareAndSwapS:
2379     case Op_WeakCompareAndSwapI:
2380     case Op_WeakCompareAndSwapL:
2381     case Op_WeakCompareAndSwapP:
2382     case Op_WeakCompareAndSwapN:
2383     case Op_CompareAndSwapB:
2384     case Op_CompareAndSwapS:
2385     case Op_CompareAndSwapI:
2386     case Op_CompareAndSwapL:
2387     case Op_CompareAndSwapP:
2388     case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2389       Node* newval = n->in(MemNode::ValueIn);
2390       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2391       Node* pair = new BinaryNode(oldval, newval);
2392       n->set_req(MemNode::ValueIn, pair);
2393       n->del_req(LoadStoreConditionalNode::ExpectedIn);
2394       break;
2395     }
2396     case Op_CMoveD:              // Convert trinary to binary-tree
2397     case Op_CMoveF:
2398     case Op_CMoveI:
2399     case Op_CMoveL:
2400     case Op_CMoveN:
2401     case Op_CMoveP: {
2402       // Restructure into a binary tree for Matching.  It's possible that
2403       // we could move this code up next to the graph reshaping for IfNodes
2404       // or vice-versa, but I do not want to debug this for Ladybird.
2405       // 10/2/2000 CNC.
2406       Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2407       n->set_req(1, pair1);
2408       Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2409       n->set_req(2, pair2);
2410       n->del_req(3);
2411       break;
2412     }
2413     case Op_CMoveVF:
2414     case Op_CMoveVD: {
2415       // Restructure into a binary tree for Matching:
2416       // CMoveVF (Binary bool mask) (Binary src1 src2)
2417       Node* in_cc = n->in(1);
2418       assert(in_cc->is_Con(), "The condition input of cmove vector node must be a constant.");
2419       Node* bol = new BoolNode(in_cc, (BoolTest::mask)in_cc->get_int());
2420       Node* pair1 = new BinaryNode(bol, in_cc);
2421       n->set_req(1, pair1);
2422       Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2423       n->set_req(2, pair2);
2424       n->del_req(3);
2425       break;
2426     }
2427     case Op_VectorCmpMasked: {
2428       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2429       n->set_req(2, pair1);
2430       n->del_req(3);
2431       break;
2432     }
2433     case Op_MacroLogicV: {
2434       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2435       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2436       n->set_req(1, pair1);
2437       n->set_req(2, pair2);
2438       n->del_req(4);
2439       n->del_req(3);
2440       break;
2441     }
2442     case Op_StoreVectorMasked: {
2443       Node* pair = new BinaryNode(n->in(3), n->in(4));
2444       n->set_req(3, pair);
2445       n->del_req(4);
2446       break;
2447     }
2448     case Op_LoopLimit: {
2449       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2450       n->set_req(1, pair1);
2451       n->set_req(2, n->in(3));
2452       n->del_req(3);
2453       break;
2454     }
2455     case Op_StrEquals:
2456     case Op_StrIndexOfChar: {
2457       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2458       n->set_req(2, pair1);
2459       n->set_req(3, n->in(4));
2460       n->del_req(4);
2461       break;
2462     }
2463     case Op_StrComp:
2464     case Op_StrIndexOf: {
2465       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2466       n->set_req(2, pair1);
2467       Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2468       n->set_req(3, pair2);
2469       n->del_req(5);
2470       n->del_req(4);
2471       break;
2472     }
2473     case Op_StrCompressedCopy:
2474     case Op_StrInflatedCopy:
2475     case Op_EncodeISOArray: {
2476       // Restructure into a binary tree for Matching.
2477       Node* pair = new BinaryNode(n->in(3), n->in(4));
2478       n->set_req(3, pair);
2479       n->del_req(4);
2480       break;
2481     }
2482     case Op_FmaD:
2483     case Op_FmaF:
2484     case Op_FmaVD:
2485     case Op_FmaVF: {
2486       // Restructure into a binary tree for Matching.
2487       Node* pair = new BinaryNode(n->in(1), n->in(2));
2488       n->set_req(2, pair);
2489       n->set_req(1, n->in(3));
2490       n->del_req(3);
2491       break;
2492     }
2493     case Op_MulAddS2I: {
2494       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2495       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2496       n->set_req(1, pair1);
2497       n->set_req(2, pair2);
2498       n->del_req(4);
2499       n->del_req(3);
2500       break;
2501     }
2502     case Op_ClearArray: {
2503       Node* pair = new BinaryNode(n->in(2), n->in(3));
2504       n->set_req(2, pair);
2505       n->set_req(3, n->in(4));
2506       n->del_req(4);
2507       break;
2508     }
2509     case Op_CopySignD:
2510     case Op_SignumVF:
2511     case Op_SignumVD:
2512     case Op_SignumF:
2513     case Op_SignumD: {
2514       Node* pair = new BinaryNode(n->in(2), n->in(3));
2515       n->set_req(2, pair);
2516       n->del_req(3);
2517       break;
2518     }
2519     case Op_VectorBlend:
2520     case Op_VectorInsert: {
2521       Node* pair = new BinaryNode(n->in(1), n->in(2));
2522       n->set_req(1, pair);
2523       n->set_req(2, n->in(3));
2524       n->del_req(3);
2525       break;
2526     }
2527     case Op_LoadVectorGatherMasked:
2528     case Op_StoreVectorScatter: {
2529       Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2530       n->set_req(MemNode::ValueIn, pair);
2531       n->del_req(MemNode::ValueIn+1);
2532       break;
2533     }
2534     case Op_StoreVectorScatterMasked: {
2535       Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2536       n->set_req(MemNode::ValueIn+1, pair);
2537       n->del_req(MemNode::ValueIn+2);
2538       pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2539       n->set_req(MemNode::ValueIn, pair);
2540       n->del_req(MemNode::ValueIn+1);
2541       break;
2542     }
2543     case Op_VectorMaskCmp: {
2544       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2545       n->set_req(2, n->in(3));
2546       n->del_req(3);
2547       break;
2548     }
2549     default:
2550       break;
2551   }
2552 }
2553 
2554 #ifndef PRODUCT
2555 void Matcher::record_new2old(Node* newn, Node* old) {
2556   _new2old_map.map(newn->_idx, old);
2557   if (!_reused.test_set(old->_igv_idx)) {
2558     // Reuse the Ideal-level IGV identifier so that the node can be tracked
2559     // across matching. If there are multiple machine nodes expanded from the
2560     // same Ideal node, only one will reuse its IGV identifier.
2561     newn->_igv_idx = old->_igv_idx;
2562   }
2563 }
2564 
2565 // machine-independent root to machine-dependent root
2566 void Matcher::dump_old2new_map() {
2567   _old2new_map.dump();
2568 }
2569 #endif // !PRODUCT
2570 
2571 //---------------------------collect_null_checks-------------------------------
2572 // Find null checks in the ideal graph; write a machine-specific node for
2573 // it.  Used by later implicit-null-check handling.  Actually collects
2574 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2575 // value being tested.
2576 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2577   Node *iff = proj->in(0);
2578   if( iff->Opcode() == Op_If ) {
2579     // During matching If's have Bool & Cmp side-by-side
2580     BoolNode *b = iff->in(1)->as_Bool();
2581     Node *cmp = iff->in(2);
2582     int opc = cmp->Opcode();
2583     if (opc != Op_CmpP && opc != Op_CmpN) return;
2584 
2585     const Type* ct = cmp->in(2)->bottom_type();
2586     if (ct == TypePtr::NULL_PTR ||
2587         (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2588 
2589       bool push_it = false;
2590       if( proj->Opcode() == Op_IfTrue ) {
2591 #ifndef PRODUCT
2592         extern int all_null_checks_found;
2593         all_null_checks_found++;
2594 #endif
2595         if( b->_test._test == BoolTest::ne ) {
2596           push_it = true;
2597         }
2598       } else {
2599         assert( proj->Opcode() == Op_IfFalse, "" );
2600         if( b->_test._test == BoolTest::eq ) {
2601           push_it = true;
2602         }
2603       }
2604       if( push_it ) {
2605         _null_check_tests.push(proj);
2606         Node* val = cmp->in(1);
2607 #ifdef _LP64
2608         if (val->bottom_type()->isa_narrowoop() &&
2609             !Matcher::narrow_oop_use_complex_address()) {
2610           //
2611           // Look for DecodeN node which should be pinned to orig_proj.
2612           // On platforms (Sparc) which can not handle 2 adds
2613           // in addressing mode we have to keep a DecodeN node and
2614           // use it to do implicit NULL check in address.
2615           //
2616           // DecodeN node was pinned to non-null path (orig_proj) during
2617           // CastPP transformation in final_graph_reshaping_impl().
2618           //
2619           uint cnt = orig_proj->outcnt();
2620           for (uint i = 0; i < orig_proj->outcnt(); i++) {
2621             Node* d = orig_proj->raw_out(i);
2622             if (d->is_DecodeN() && d->in(1) == val) {
2623               val = d;
2624               val->set_req(0, NULL); // Unpin now.
2625               // Mark this as special case to distinguish from
2626               // a regular case: CmpP(DecodeN, NULL).
2627               val = (Node*)(((intptr_t)val) | 1);
2628               break;
2629             }
2630           }
2631         }
2632 #endif
2633         _null_check_tests.push(val);
2634       }
2635     }
2636   }
2637 }
2638 
2639 //---------------------------validate_null_checks------------------------------
2640 // Its possible that the value being NULL checked is not the root of a match
2641 // tree.  If so, I cannot use the value in an implicit null check.
2642 void Matcher::validate_null_checks( ) {
2643   uint cnt = _null_check_tests.size();
2644   for( uint i=0; i < cnt; i+=2 ) {
2645     Node *test = _null_check_tests[i];
2646     Node *val = _null_check_tests[i+1];
2647     bool is_decoden = ((intptr_t)val) & 1;
2648     val = (Node*)(((intptr_t)val) & ~1);
2649     if (has_new_node(val)) {
2650       Node* new_val = new_node(val);
2651       if (is_decoden) {
2652         assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2653         // Note: new_val may have a control edge if
2654         // the original ideal node DecodeN was matched before
2655         // it was unpinned in Matcher::collect_null_checks().
2656         // Unpin the mach node and mark it.
2657         new_val->set_req(0, NULL);
2658         new_val = (Node*)(((intptr_t)new_val) | 1);
2659       }
2660       // Is a match-tree root, so replace with the matched value
2661       _null_check_tests.map(i+1, new_val);
2662     } else {
2663       // Yank from candidate list
2664       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2665       _null_check_tests.map(i,_null_check_tests[--cnt]);
2666       _null_check_tests.pop();
2667       _null_check_tests.pop();
2668       i-=2;
2669     }
2670   }
2671 }
2672 
2673 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2674   // Advice matcher to perform null checks on the narrow oop side.
2675   // Implicit checks are not possible on the uncompressed oop side anyway
2676   // (at least not for read accesses).
2677   // Performs significantly better (especially on Power 6).
2678   if (!os::zero_page_read_protected()) {
2679     return true;
2680   }
2681   return CompressedOops::use_implicit_null_checks() &&
2682          (narrow_oop_use_complex_address() ||
2683           CompressedOops::base() != NULL);
2684 }
2685 
2686 // Compute RegMask for an ideal register.
2687 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2688   const Type* t = Type::mreg2type[ideal_reg];
2689   if (t == NULL) {
2690     assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2691     return NULL; // not supported
2692   }
2693   Node* fp  = ret->in(TypeFunc::FramePtr);
2694   Node* mem = ret->in(TypeFunc::Memory);
2695   const TypePtr* atp = TypePtr::BOTTOM;
2696   MemNode::MemOrd mo = MemNode::unordered;
2697 
2698   Node* spill;
2699   switch (ideal_reg) {
2700     case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break;
2701     case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(),       mo); break;
2702     case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(),       mo); break;
2703     case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t,                 mo); break;
2704     case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t,                 mo); break;
2705     case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(),      mo); break;
2706 
2707     case Op_VecA: // fall-through
2708     case Op_VecS: // fall-through
2709     case Op_VecD: // fall-through
2710     case Op_VecX: // fall-through
2711     case Op_VecY: // fall-through
2712     case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break;
2713     case Op_RegVectMask: return Matcher::predicate_reg_mask();
2714 
2715     default: ShouldNotReachHere();
2716   }
2717   MachNode* mspill = match_tree(spill);
2718   assert(mspill != NULL, "matching failed: %d", ideal_reg);
2719   // Handle generic vector operand case
2720   if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2721     specialize_mach_node(mspill);
2722   }
2723   return &mspill->out_RegMask();
2724 }
2725 
2726 // Process Mach IR right after selection phase is over.
2727 void Matcher::do_postselect_cleanup() {
2728   if (supports_generic_vector_operands) {
2729     specialize_generic_vector_operands();
2730     if (C->failing())  return;
2731   }
2732 }
2733 
2734 //----------------------------------------------------------------------
2735 // Generic machine operands elision.
2736 //----------------------------------------------------------------------
2737 
2738 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2739 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2740   assert(use->in(idx) == tmp, "not a user");
2741   assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2742 
2743   if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2744     tmp->_opnds[0] = use->_opnds[0]->clone();
2745   } else {
2746     uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2747     tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2748   }
2749 }
2750 
2751 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2752 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2753   assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2754   Node* def = NULL;
2755   if (opnd_idx == 0) { // DEF
2756     def = m; // use mach node itself to compute vector operand type
2757   } else {
2758     int base_idx = m->operand_index(opnd_idx);
2759     def = m->in(base_idx);
2760     if (def->is_Mach()) {
2761       if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2762         specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2763       } else if (is_reg2reg_move(def->as_Mach())) {
2764         def = def->in(1); // skip over generic reg-to-reg moves
2765       }
2766     }
2767   }
2768   assert(def->bottom_type()->isa_vect(), "not a vector");
2769   uint ideal_vreg = def->bottom_type()->ideal_reg();
2770   return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2771 }
2772 
2773 void Matcher::specialize_mach_node(MachNode* m) {
2774   assert(!m->is_MachTemp(), "processed along with its user");
2775   // For generic use operands pull specific register class operands from
2776   // its def instruction's output operand (def operand).
2777   for (uint i = 0; i < m->num_opnds(); i++) {
2778     if (Matcher::is_generic_vector(m->_opnds[i])) {
2779       m->_opnds[i] = specialize_vector_operand(m, i);
2780     }
2781   }
2782 }
2783 
2784 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2785 void Matcher::specialize_generic_vector_operands() {
2786   assert(supports_generic_vector_operands, "sanity");
2787   ResourceMark rm;
2788 
2789   // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2790   // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2791   Unique_Node_List live_nodes;
2792   C->identify_useful_nodes(live_nodes);
2793 
2794   while (live_nodes.size() > 0) {
2795     MachNode* m = live_nodes.pop()->isa_Mach();
2796     if (m != NULL) {
2797       if (Matcher::is_reg2reg_move(m)) {
2798         // Register allocator properly handles vec <=> leg moves using register masks.
2799         int opnd_idx = m->operand_index(1);
2800         Node* def = m->in(opnd_idx);
2801         m->subsume_by(def, C);
2802       } else if (m->is_MachTemp()) {
2803         // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2804       } else {
2805         specialize_mach_node(m);
2806       }
2807     }
2808   }
2809 }
2810 
2811 uint Matcher::vector_length(const Node* n) {
2812   const TypeVect* vt = n->bottom_type()->is_vect();
2813   return vt->length();
2814 }
2815 
2816 uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2817   int def_idx = use->operand_index(opnd);
2818   Node* def = use->in(def_idx);
2819   return def->bottom_type()->is_vect()->length();
2820 }
2821 
2822 uint Matcher::vector_length_in_bytes(const Node* n) {
2823   const TypeVect* vt = n->bottom_type()->is_vect();
2824   return vt->length_in_bytes();
2825 }
2826 
2827 uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2828   uint def_idx = use->operand_index(opnd);
2829   Node* def = use->in(def_idx);
2830   return def->bottom_type()->is_vect()->length_in_bytes();
2831 }
2832 
2833 BasicType Matcher::vector_element_basic_type(const Node* n) {
2834   const TypeVect* vt = n->bottom_type()->is_vect();
2835   return vt->element_basic_type();
2836 }
2837 
2838 BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2839   int def_idx = use->operand_index(opnd);
2840   Node* def = use->in(def_idx);
2841   return def->bottom_type()->is_vect()->element_basic_type();
2842 }
2843 
2844 #ifdef ASSERT
2845 bool Matcher::verify_after_postselect_cleanup() {
2846   assert(!C->failing(), "sanity");
2847   if (supports_generic_vector_operands) {
2848     Unique_Node_List useful;
2849     C->identify_useful_nodes(useful);
2850     for (uint i = 0; i < useful.size(); i++) {
2851       MachNode* m = useful.at(i)->isa_Mach();
2852       if (m != NULL) {
2853         assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
2854         for (uint j = 0; j < m->num_opnds(); j++) {
2855           assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2856         }
2857       }
2858     }
2859   }
2860   return true;
2861 }
2862 #endif // ASSERT
2863 
2864 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2865 // atomic instruction acting as a store_load barrier without any
2866 // intervening volatile load, and thus we don't need a barrier here.
2867 // We retain the Node to act as a compiler ordering barrier.
2868 bool Matcher::post_store_load_barrier(const Node* vmb) {
2869   Compile* C = Compile::current();
2870   assert(vmb->is_MemBar(), "");
2871   assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2872   const MemBarNode* membar = vmb->as_MemBar();
2873 
2874   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2875   Node* ctrl = NULL;
2876   for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2877     Node* p = membar->fast_out(i);
2878     assert(p->is_Proj(), "only projections here");
2879     if ((p->as_Proj()->_con == TypeFunc::Control) &&
2880         !C->node_arena()->contains(p)) { // Unmatched old-space only
2881       ctrl = p;
2882       break;
2883     }
2884   }
2885   assert((ctrl != NULL), "missing control projection");
2886 
2887   for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2888     Node *x = ctrl->fast_out(j);
2889     int xop = x->Opcode();
2890 
2891     // We don't need current barrier if we see another or a lock
2892     // before seeing volatile load.
2893     //
2894     // Op_Fastunlock previously appeared in the Op_* list below.
2895     // With the advent of 1-0 lock operations we're no longer guaranteed
2896     // that a monitor exit operation contains a serializing instruction.
2897 
2898     if (xop == Op_MemBarVolatile ||
2899         xop == Op_CompareAndExchangeB ||
2900         xop == Op_CompareAndExchangeS ||
2901         xop == Op_CompareAndExchangeI ||
2902         xop == Op_CompareAndExchangeL ||
2903         xop == Op_CompareAndExchangeP ||
2904         xop == Op_CompareAndExchangeN ||
2905         xop == Op_WeakCompareAndSwapB ||
2906         xop == Op_WeakCompareAndSwapS ||
2907         xop == Op_WeakCompareAndSwapL ||
2908         xop == Op_WeakCompareAndSwapP ||
2909         xop == Op_WeakCompareAndSwapN ||
2910         xop == Op_WeakCompareAndSwapI ||
2911         xop == Op_CompareAndSwapB ||
2912         xop == Op_CompareAndSwapS ||
2913         xop == Op_CompareAndSwapL ||
2914         xop == Op_CompareAndSwapP ||
2915         xop == Op_CompareAndSwapN ||
2916         xop == Op_CompareAndSwapI ||
2917         BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2918       return true;
2919     }
2920 
2921     // Op_FastLock previously appeared in the Op_* list above.
2922     if (xop == Op_FastLock) {
2923       return true;
2924     }
2925 
2926     if (x->is_MemBar()) {
2927       // We must retain this membar if there is an upcoming volatile
2928       // load, which will be followed by acquire membar.
2929       if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2930         return false;
2931       } else {
2932         // For other kinds of barriers, check by pretending we
2933         // are them, and seeing if we can be removed.
2934         return post_store_load_barrier(x->as_MemBar());
2935       }
2936     }
2937 
2938     // probably not necessary to check for these
2939     if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2940       return false;
2941     }
2942   }
2943   return false;
2944 }
2945 
2946 // Check whether node n is a branch to an uncommon trap that we could
2947 // optimize as test with very high branch costs in case of going to
2948 // the uncommon trap. The code must be able to be recompiled to use
2949 // a cheaper test.
2950 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2951   // Don't do it for natives, adapters, or runtime stubs
2952   Compile *C = Compile::current();
2953   if (!C->is_method_compilation()) return false;
2954 
2955   assert(n->is_If(), "You should only call this on if nodes.");
2956   IfNode *ifn = n->as_If();
2957 
2958   Node *ifFalse = NULL;
2959   for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2960     if (ifn->fast_out(i)->is_IfFalse()) {
2961       ifFalse = ifn->fast_out(i);
2962       break;
2963     }
2964   }
2965   assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2966 
2967   Node *reg = ifFalse;
2968   int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
2969                // Alternatively use visited set?  Seems too expensive.
2970   while (reg != NULL && cnt > 0) {
2971     CallNode *call = NULL;
2972     RegionNode *nxt_reg = NULL;
2973     for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2974       Node *o = reg->fast_out(i);
2975       if (o->is_Call()) {
2976         call = o->as_Call();
2977       }
2978       if (o->is_Region()) {
2979         nxt_reg = o->as_Region();
2980       }
2981     }
2982 
2983     if (call &&
2984         call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2985       const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2986       if (trtype->isa_int() && trtype->is_int()->is_con()) {
2987         jint tr_con = trtype->is_int()->get_con();
2988         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2989         Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2990         assert((int)reason < (int)BitsPerInt, "recode bit map");
2991 
2992         if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2993             && action != Deoptimization::Action_none) {
2994           // This uncommon trap is sure to recompile, eventually.
2995           // When that happens, C->too_many_traps will prevent
2996           // this transformation from happening again.
2997           return true;
2998         }
2999       }
3000     }
3001 
3002     reg = nxt_reg;
3003     cnt--;
3004   }
3005 
3006   return false;
3007 }
3008 
3009 //=============================================================================
3010 //---------------------------State---------------------------------------------
3011 State::State(void) : _rule() {
3012 #ifdef ASSERT
3013   _id = 0;
3014   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3015   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3016 #endif
3017 }
3018 
3019 #ifdef ASSERT
3020 State::~State() {
3021   _id = 99;
3022   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3023   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3024   memset(_cost, -3, sizeof(_cost));
3025   memset(_rule, -3, sizeof(_rule));
3026 }
3027 #endif
3028 
3029 #ifndef PRODUCT
3030 //---------------------------dump----------------------------------------------
3031 void State::dump() {
3032   tty->print("\n");
3033   dump(0);
3034 }
3035 
3036 void State::dump(int depth) {
3037   for (int j = 0; j < depth; j++) {
3038     tty->print("   ");
3039   }
3040   tty->print("--N: ");
3041   _leaf->dump();
3042   uint i;
3043   for (i = 0; i < _LAST_MACH_OPER; i++) {
3044     // Check for valid entry
3045     if (valid(i)) {
3046       for (int j = 0; j < depth; j++) {
3047         tty->print("   ");
3048       }
3049       assert(cost(i) != max_juint, "cost must be a valid value");
3050       assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
3051       tty->print_cr("%s  %d  %s",
3052                     ruleName[i], cost(i), ruleName[rule(i)] );
3053     }
3054   }
3055   tty->cr();
3056 
3057   for (i = 0; i < 2; i++) {
3058     if (_kids[i]) {
3059       _kids[i]->dump(depth + 1);
3060     }
3061   }
3062 }
3063 #endif