1 /*
   2  * Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "opto/ad.hpp"
  39 #include "opto/block.hpp"
  40 #include "opto/c2_MacroAssembler.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/type.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 #include "utilities/xmlstream.hpp"
  55 
  56 #ifndef PRODUCT
  57 #define DEBUG_ARG(x) , x
  58 #else
  59 #define DEBUG_ARG(x)
  60 #endif
  61 
  62 //------------------------------Scheduling----------------------------------
  63 // This class contains all the information necessary to implement instruction
  64 // scheduling and bundling.
  65 class Scheduling {
  66 
  67 private:
  68   // Arena to use
  69   Arena *_arena;
  70 
  71   // Control-Flow Graph info
  72   PhaseCFG *_cfg;
  73 
  74   // Register Allocation info
  75   PhaseRegAlloc *_regalloc;
  76 
  77   // Number of nodes in the method
  78   uint _node_bundling_limit;
  79 
  80   // List of scheduled nodes. Generated in reverse order
  81   Node_List _scheduled;
  82 
  83   // List of nodes currently available for choosing for scheduling
  84   Node_List _available;
  85 
  86   // For each instruction beginning a bundle, the number of following
  87   // nodes to be bundled with it.
  88   Bundle *_node_bundling_base;
  89 
  90   // Mapping from register to Node
  91   Node_List _reg_node;
  92 
  93   // Free list for pinch nodes.
  94   Node_List _pinch_free_list;
  95 
  96   // Number of uses of this node within the containing basic block.
  97   short *_uses;
  98 
  99   // Schedulable portion of current block.  Skips Region/Phi/CreateEx up
 100   // front, branch+proj at end.  Also skips Catch/CProj (same as
 101   // branch-at-end), plus just-prior exception-throwing call.
 102   uint _bb_start, _bb_end;
 103 
 104   // Latency from the end of the basic block as scheduled
 105   unsigned short *_current_latency;
 106 
 107   // Remember the next node
 108   Node *_next_node;
 109 
 110   // Length of the current bundle, in instructions
 111   uint _bundle_instr_count;
 112 
 113   // Current Cycle number, for computing latencies and bundling
 114   uint _bundle_cycle_number;
 115 
 116   // Bundle information
 117   Pipeline_Use_Element _bundle_use_elements[resource_count];
 118   Pipeline_Use         _bundle_use;
 119 
 120   // Dump the available list
 121   void dump_available() const;
 122 
 123 public:
 124   Scheduling(Arena *arena, Compile &compile);
 125 
 126   // Step ahead "i" cycles
 127   void step(uint i);
 128 
 129   // Step ahead 1 cycle, and clear the bundle state (for example,
 130   // at a branch target)
 131   void step_and_clear();
 132 
 133   Bundle* node_bundling(const Node *n) {
 134     assert(valid_bundle_info(n), "oob");
 135     return (&_node_bundling_base[n->_idx]);
 136   }
 137 
 138   bool valid_bundle_info(const Node *n) const {
 139     return (_node_bundling_limit > n->_idx);
 140   }
 141 
 142   bool starts_bundle(const Node *n) const {
 143     return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
 144   }
 145 
 146   // Do the scheduling
 147   void DoScheduling();
 148 
 149   // Compute the register antidependencies within a basic block
 150   void ComputeRegisterAntidependencies(Block *bb);
 151   void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
 152   void verify_good_schedule( Block *b, const char *msg );
 153   void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
 154   void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
 155 
 156   // Add a node to the current bundle
 157   void AddNodeToBundle(Node *n, const Block *bb);
 158 
 159   // Return an integer less than, equal to, or greater than zero
 160   // if the stack offset of the first argument is respectively
 161   // less than, equal to, or greater than the second.
 162   int compare_two_spill_nodes(Node* first, Node* second);
 163 
 164   // Add a node to the list of available nodes
 165   void AddNodeToAvailableList(Node *n);
 166 
 167   // Compute the local use count for the nodes in a block, and compute
 168   // the list of instructions with no uses in the block as available
 169   void ComputeUseCount(const Block *bb);
 170 
 171   // Choose an instruction from the available list to add to the bundle
 172   Node * ChooseNodeToBundle();
 173 
 174   // See if this Node fits into the currently accumulating bundle
 175   bool NodeFitsInBundle(Node *n);
 176 
 177   // Decrement the use count for a node
 178  void DecrementUseCounts(Node *n, const Block *bb);
 179 
 180   // Garbage collect pinch nodes for reuse by other blocks.
 181   void garbage_collect_pinch_nodes();
 182   // Clean up a pinch node for reuse (helper for above).
 183   void cleanup_pinch( Node *pinch );
 184 
 185   // Information for statistics gathering
 186 #ifndef PRODUCT
 187 private:
 188   // Gather information on size of nops relative to total
 189   static uint _total_nop_size, _total_method_size;
 190   static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
 191 
 192 public:
 193   static void print_statistics();
 194 
 195   static void increment_instructions_per_bundle(uint i) {
 196     _total_instructions_per_bundle[i]++;
 197   }
 198 
 199   static void increment_nop_size(uint s) {
 200     _total_nop_size += s;
 201   }
 202 
 203   static void increment_method_size(uint s) {
 204     _total_method_size += s;
 205   }
 206 #endif
 207 
 208 };
 209 
 210 PhaseOutput::PhaseOutput()
 211   : Phase(Phase::Output),
 212     _code_buffer("Compile::Fill_buffer"),
 213     _first_block_size(0),
 214     _handler_table(),
 215     _inc_table(),
 216     _stub_list(),
 217     _oop_map_set(nullptr),
 218     _scratch_buffer_blob(nullptr),
 219     _scratch_locs_memory(nullptr),
 220     _scratch_const_size(-1),
 221     _in_scratch_emit_size(false),
 222     _frame_slots(0),
 223     _code_offsets(),
 224     _node_bundling_limit(0),
 225     _node_bundling_base(nullptr),
 226     _orig_pc_slot(0),
 227     _orig_pc_slot_offset_in_bytes(0),
 228     _buf_sizes(),
 229     _block(nullptr),
 230     _index(0) {
 231   C->set_output(this);
 232   if (C->stub_name() == nullptr) {
 233     int fixed_slots = C->fixed_slots();
 234     if (C->needs_stack_repair()) {
 235       fixed_slots -= 2;
 236     }
 237     // TODO 8284443 Only reserve extra slot if needed
 238     if (InlineTypeReturnedAsFields) {
 239       fixed_slots -= 2;
 240     }
 241     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 242   }
 243 }
 244 
 245 PhaseOutput::~PhaseOutput() {
 246   C->set_output(nullptr);
 247   if (_scratch_buffer_blob != nullptr) {
 248     BufferBlob::free(_scratch_buffer_blob);
 249   }
 250 }
 251 
 252 void PhaseOutput::perform_mach_node_analysis() {
 253   // Late barrier analysis must be done after schedule and bundle
 254   // Otherwise liveness based spilling will fail
 255   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 256   bs->late_barrier_analysis();
 257 
 258   pd_perform_mach_node_analysis();
 259 
 260   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 261 }
 262 
 263 // Convert Nodes to instruction bits and pass off to the VM
 264 void PhaseOutput::Output() {
 265   // RootNode goes
 266   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 267 
 268   // The number of new nodes (mostly MachNop) is proportional to
 269   // the number of java calls and inner loops which are aligned.
 270   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 271                             C->inner_loops()*(OptoLoopAlignment-1)),
 272                            "out of nodes before code generation" ) ) {
 273     return;
 274   }
 275   // Make sure I can find the Start Node
 276   Block *entry = C->cfg()->get_block(1);
 277   Block *broot = C->cfg()->get_root_block();
 278 
 279   const StartNode *start = entry->head()->as_Start();
 280 
 281   // Replace StartNode with prolog
 282   Label verified_entry;
 283   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 284   entry->map_node(prolog, 0);
 285   C->cfg()->map_node_to_block(prolog, entry);
 286   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 287 
 288   // Virtual methods need an unverified entry point
 289   if (C->is_osr_compilation()) {
 290     if (PoisonOSREntry) {
 291       // TODO: Should use a ShouldNotReachHereNode...
 292       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 293     }
 294   } else {
 295     if (C->method()) {
 296       if (C->method()->has_scalarized_args()) {
 297         // Add entry point to unpack all inline type arguments
 298         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 299         if (!C->method()->is_static()) {
 300           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 301           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 302           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 303           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 304         }
 305       } else if (!C->method()->is_static()) {
 306         // Insert unvalidated entry point
 307         C->cfg()->insert(broot, 0, new MachUEPNode());
 308       }
 309     }
 310   }
 311 
 312   // Break before main entry point
 313   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 314       (OptoBreakpoint && C->is_method_compilation())       ||
 315       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 316       (OptoBreakpointC2R && !C->method())                   ) {
 317     // checking for C->method() means that OptoBreakpoint does not apply to
 318     // runtime stubs or frame converters
 319     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 320   }
 321 
 322   // Insert epilogs before every return
 323   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 324     Block* block = C->cfg()->get_block(i);
 325     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 326       Node* m = block->end();
 327       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 328         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 329         block->add_inst(epilog);
 330         C->cfg()->map_node_to_block(epilog, block);
 331       }
 332     }
 333   }
 334 
 335   // Keeper of sizing aspects
 336   _buf_sizes = BufferSizingData();
 337 
 338   // Initialize code buffer
 339   estimate_buffer_size(_buf_sizes._const);
 340   if (C->failing()) return;
 341 
 342   // Pre-compute the length of blocks and replace
 343   // long branches with short if machine supports it.
 344   // Must be done before ScheduleAndBundle due to SPARC delay slots
 345   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 346   blk_starts[0] = 0;
 347   shorten_branches(blk_starts);
 348 
 349   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 350     // Compute the offsets of the entry points required by the inline type calling convention
 351     if (!C->method()->is_static()) {
 352       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 353       // Entry                     (unverified) @ offset 0
 354       // Verified_Inline_Entry_RO
 355       // Inline_Entry              (unverified)
 356       // Verified_Inline_Entry
 357       uint offset = 0;
 358       _code_offsets.set_value(CodeOffsets::Entry, offset);
 359 
 360       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 361       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 362 
 363       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 364       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 365 
 366       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 367       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 368     } else {
 369       _code_offsets.set_value(CodeOffsets::Entry, CodeOffsets::no_such_entry_point); // will be patched later
 370       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 371     }
 372   }
 373 
 374   ScheduleAndBundle();
 375   if (C->failing()) {
 376     return;
 377   }
 378 
 379   perform_mach_node_analysis();
 380 
 381   // Complete sizing of codebuffer
 382   CodeBuffer* cb = init_buffer();
 383   if (cb == nullptr || C->failing()) {
 384     return;
 385   }
 386 
 387   BuildOopMaps();
 388 
 389   if (C->failing())  {
 390     return;
 391   }
 392 
 393   C2_MacroAssembler masm(cb);
 394   fill_buffer(&masm, blk_starts);
 395   if (C->failing()) {
 396     // If we bailed out during matching, not all nodes were visited and the
 397     // label might be in inconsistent state (used but not bound). Reset it.
 398     verified_entry.reset();
 399   }
 400 }
 401 
 402 bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
 403   // Determine if we need to generate a stack overflow check.
 404   // Do it if the method is not a stub function and
 405   // has java calls or has frame size > vm_page_size/8.
 406   // The debug VM checks that deoptimization doesn't trigger an
 407   // unexpected stack overflow (compiled method stack banging should
 408   // guarantee it doesn't happen) so we always need the stack bang in
 409   // a debug VM.
 410   return (C->stub_function() == nullptr &&
 411           (C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3
 412            DEBUG_ONLY(|| true)));
 413 }
 414 
 415 bool PhaseOutput::need_register_stack_bang() const {
 416   // Determine if we need to generate a register stack overflow check.
 417   // This is only used on architectures which have split register
 418   // and memory stacks.
 419   // Bang if the method is not a stub function and has java calls
 420   return (C->stub_function() == nullptr && C->has_java_calls());
 421 }
 422 
 423 
 424 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
 425 // of a loop. When aligning a loop we need to provide enough instructions
 426 // in cpu's fetch buffer to feed decoders. The loop alignment could be
 427 // avoided if we have enough instructions in fetch buffer at the head of a loop.
 428 // By default, the size is set to 999999 by Block's constructor so that
 429 // a loop will be aligned if the size is not reset here.
 430 //
 431 // Note: Mach instructions could contain several HW instructions
 432 // so the size is estimated only.
 433 //
 434 void PhaseOutput::compute_loop_first_inst_sizes() {
 435   // The next condition is used to gate the loop alignment optimization.
 436   // Don't aligned a loop if there are enough instructions at the head of a loop
 437   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
 438   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
 439   // equal to 11 bytes which is the largest address NOP instruction.
 440   if (MaxLoopPad < OptoLoopAlignment - 1) {
 441     uint last_block = C->cfg()->number_of_blocks() - 1;
 442     for (uint i = 1; i <= last_block; i++) {
 443       Block* block = C->cfg()->get_block(i);
 444       // Check the first loop's block which requires an alignment.
 445       if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
 446         uint sum_size = 0;
 447         uint inst_cnt = NumberOfLoopInstrToAlign;
 448         inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, C->regalloc());
 449 
 450         // Check subsequent fallthrough blocks if the loop's first
 451         // block(s) does not have enough instructions.
 452         Block *nb = block;
 453         while(inst_cnt > 0 &&
 454               i < last_block &&
 455               !C->cfg()->get_block(i + 1)->has_loop_alignment() &&
 456               !nb->has_successor(block)) {
 457           i++;
 458           nb = C->cfg()->get_block(i);
 459           inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, C->regalloc());
 460         } // while( inst_cnt > 0 && i < last_block  )
 461 
 462         block->set_first_inst_size(sum_size);
 463       } // f( b->head()->is_Loop() )
 464     } // for( i <= last_block )
 465   } // if( MaxLoopPad < OptoLoopAlignment-1 )
 466 }
 467 
 468 // The architecture description provides short branch variants for some long
 469 // branch instructions. Replace eligible long branches with short branches.
 470 void PhaseOutput::shorten_branches(uint* blk_starts) {
 471 
 472   Compile::TracePhase tp(_t_shortenBranches);
 473 
 474   // Compute size of each block, method size, and relocation information size
 475   uint nblocks  = C->cfg()->number_of_blocks();
 476 
 477   uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
 478   uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
 479   int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
 480 
 481   // Collect worst case block paddings
 482   int* block_worst_case_pad = NEW_RESOURCE_ARRAY(int, nblocks);
 483   memset(block_worst_case_pad, 0, nblocks * sizeof(int));
 484 
 485   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
 486   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
 487 
 488   bool has_short_branch_candidate = false;
 489 
 490   // Initialize the sizes to 0
 491   int code_size  = 0;          // Size in bytes of generated code
 492   int stub_size  = 0;          // Size in bytes of all stub entries
 493   // Size in bytes of all relocation entries, including those in local stubs.
 494   // Start with 2-bytes of reloc info for the unvalidated entry point
 495   int reloc_size = 1;          // Number of relocation entries
 496 
 497   // Make three passes.  The first computes pessimistic blk_starts,
 498   // relative jmp_offset and reloc_size information.  The second performs
 499   // short branch substitution using the pessimistic sizing.  The
 500   // third inserts nops where needed.
 501 
 502   // Step one, perform a pessimistic sizing pass.
 503   uint last_call_adr = max_juint;
 504   uint last_avoid_back_to_back_adr = max_juint;
 505   uint nop_size = (new MachNopNode())->size(C->regalloc());
 506   for (uint i = 0; i < nblocks; i++) { // For all blocks
 507     Block* block = C->cfg()->get_block(i);
 508     _block = block;
 509 
 510     // During short branch replacement, we store the relative (to blk_starts)
 511     // offset of jump in jmp_offset, rather than the absolute offset of jump.
 512     // This is so that we do not need to recompute sizes of all nodes when
 513     // we compute correct blk_starts in our next sizing pass.
 514     jmp_offset[i] = 0;
 515     jmp_size[i]   = 0;
 516     jmp_nidx[i]   = -1;
 517     DEBUG_ONLY( jmp_target[i] = 0; )
 518     DEBUG_ONLY( jmp_rule[i]   = 0; )
 519 
 520     // Sum all instruction sizes to compute block size
 521     uint last_inst = block->number_of_nodes();
 522     uint blk_size = 0;
 523     for (uint j = 0; j < last_inst; j++) {
 524       _index = j;
 525       Node* nj = block->get_node(_index);
 526       // Handle machine instruction nodes
 527       if (nj->is_Mach()) {
 528         MachNode* mach = nj->as_Mach();
 529         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 530         reloc_size += mach->reloc();
 531         if (mach->is_MachCall()) {
 532           // add size information for trampoline stub
 533           // class CallStubImpl is platform-specific and defined in the *.ad files.
 534           stub_size  += CallStubImpl::size_call_trampoline();
 535           reloc_size += CallStubImpl::reloc_call_trampoline();
 536 
 537           MachCallNode *mcall = mach->as_MachCall();
 538           // This destination address is NOT PC-relative
 539 
 540           if (mcall->entry_point() != nullptr) {
 541             mcall->method_set((intptr_t)mcall->entry_point());
 542           }
 543 
 544           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 545             stub_size  += CompiledDirectCall::to_interp_stub_size();
 546             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 547           }
 548         } else if (mach->is_MachSafePoint()) {
 549           // If call/safepoint are adjacent, account for possible
 550           // nop to disambiguate the two safepoints.
 551           // ScheduleAndBundle() can rearrange nodes in a block,
 552           // check for all offsets inside this block.
 553           if (last_call_adr >= blk_starts[i]) {
 554             blk_size += nop_size;
 555           }
 556         }
 557         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 558           // Nop is inserted between "avoid back to back" instructions.
 559           // ScheduleAndBundle() can rearrange nodes in a block,
 560           // check for all offsets inside this block.
 561           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 562             blk_size += nop_size;
 563           }
 564         }
 565         if (mach->may_be_short_branch()) {
 566           if (!nj->is_MachBranch()) {
 567 #ifndef PRODUCT
 568             nj->dump(3);
 569 #endif
 570             Unimplemented();
 571           }
 572           assert(jmp_nidx[i] == -1, "block should have only one branch");
 573           jmp_offset[i] = blk_size;
 574           jmp_size[i]   = nj->size(C->regalloc());
 575           jmp_nidx[i]   = j;
 576           has_short_branch_candidate = true;
 577         }
 578       }
 579       blk_size += nj->size(C->regalloc());
 580       // Remember end of call offset
 581       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
 582         last_call_adr = blk_starts[i]+blk_size;
 583       }
 584       // Remember end of avoid_back_to_back offset
 585       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
 586         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
 587       }
 588     }
 589 
 590     // When the next block starts a loop, we may insert pad NOP
 591     // instructions.  Since we cannot know our future alignment,
 592     // assume the worst.
 593     if (i < nblocks - 1) {
 594       Block* nb = C->cfg()->get_block(i + 1);
 595       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
 596       if (max_loop_pad > 0) {
 597         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
 598         // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
 599         // If either is the last instruction in this block, bump by
 600         // max_loop_pad in lock-step with blk_size, so sizing
 601         // calculations in subsequent blocks still can conservatively
 602         // detect that it may the last instruction in this block.
 603         if (last_call_adr == blk_starts[i]+blk_size) {
 604           last_call_adr += max_loop_pad;
 605         }
 606         if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
 607           last_avoid_back_to_back_adr += max_loop_pad;
 608         }
 609         blk_size += max_loop_pad;
 610         block_worst_case_pad[i + 1] = max_loop_pad;
 611       }
 612     }
 613 
 614     // Save block size; update total method size
 615     blk_starts[i+1] = blk_starts[i]+blk_size;
 616   }
 617 
 618   // Step two, replace eligible long jumps.
 619   bool progress = true;
 620   uint last_may_be_short_branch_adr = max_juint;
 621   while (has_short_branch_candidate && progress) {
 622     progress = false;
 623     has_short_branch_candidate = false;
 624     int adjust_block_start = 0;
 625     for (uint i = 0; i < nblocks; i++) {
 626       Block* block = C->cfg()->get_block(i);
 627       int idx = jmp_nidx[i];
 628       MachNode* mach = (idx == -1) ? nullptr: block->get_node(idx)->as_Mach();
 629       if (mach != nullptr && mach->may_be_short_branch()) {
 630 #ifdef ASSERT
 631         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
 632         int j;
 633         // Find the branch; ignore trailing NOPs.
 634         for (j = block->number_of_nodes()-1; j>=0; j--) {
 635           Node* n = block->get_node(j);
 636           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
 637             break;
 638         }
 639         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
 640 #endif
 641         int br_size = jmp_size[i];
 642         int br_offs = blk_starts[i] + jmp_offset[i];
 643 
 644         // This requires the TRUE branch target be in succs[0]
 645         uint bnum = block->non_connector_successor(0)->_pre_order;
 646         int offset = blk_starts[bnum] - br_offs;
 647         if (bnum > i) { // adjust following block's offset
 648           offset -= adjust_block_start;
 649         }
 650 
 651         // This block can be a loop header, account for the padding
 652         // in the previous block.
 653         int block_padding = block_worst_case_pad[i];
 654         assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
 655         // In the following code a nop could be inserted before
 656         // the branch which will increase the backward distance.
 657         bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
 658         assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
 659 
 660         if (needs_padding && offset <= 0)
 661           offset -= nop_size;
 662 
 663         if (C->matcher()->is_short_branch_offset(mach->rule(), br_size, offset)) {
 664           // We've got a winner.  Replace this branch.
 665           MachNode* replacement = mach->as_MachBranch()->short_branch_version();
 666 
 667           // Update the jmp_size.
 668           int new_size = replacement->size(C->regalloc());
 669           int diff     = br_size - new_size;
 670           assert(diff >= (int)nop_size, "short_branch size should be smaller");
 671           // Conservatively take into account padding between
 672           // avoid_back_to_back branches. Previous branch could be
 673           // converted into avoid_back_to_back branch during next
 674           // rounds.
 675           if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 676             jmp_offset[i] += nop_size;
 677             diff -= nop_size;
 678           }
 679           adjust_block_start += diff;
 680           block->map_node(replacement, idx);
 681           mach->subsume_by(replacement, C);
 682           mach = replacement;
 683           progress = true;
 684 
 685           jmp_size[i] = new_size;
 686           DEBUG_ONLY( jmp_target[i] = bnum; );
 687           DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
 688         } else {
 689           // The jump distance is not short, try again during next iteration.
 690           has_short_branch_candidate = true;
 691         }
 692       } // (mach->may_be_short_branch())
 693       if (mach != nullptr && (mach->may_be_short_branch() ||
 694                            mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
 695         last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
 696       }
 697       blk_starts[i+1] -= adjust_block_start;
 698     }
 699   }
 700 
 701 #ifdef ASSERT
 702   for (uint i = 0; i < nblocks; i++) { // For all blocks
 703     if (jmp_target[i] != 0) {
 704       int br_size = jmp_size[i];
 705       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
 706       if (!C->matcher()->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
 707         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
 708       }
 709       assert(C->matcher()->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
 710     }
 711   }
 712 #endif
 713 
 714   // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
 715   // after ScheduleAndBundle().
 716 
 717   // ------------------
 718   // Compute size for code buffer
 719   code_size = blk_starts[nblocks];
 720 
 721   // Relocation records
 722   reloc_size += 1;              // Relo entry for exception handler
 723 
 724   // Adjust reloc_size to number of record of relocation info
 725   // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
 726   // a relocation index.
 727   // The CodeBuffer will expand the locs array if this estimate is too low.
 728   reloc_size *= 10 / sizeof(relocInfo);
 729 
 730   _buf_sizes._reloc = reloc_size;
 731   _buf_sizes._code  = code_size;
 732   _buf_sizes._stub  = stub_size;
 733 }
 734 
 735 //------------------------------FillLocArray-----------------------------------
 736 // Create a bit of debug info and append it to the array.  The mapping is from
 737 // Java local or expression stack to constant, register or stack-slot.  For
 738 // doubles, insert 2 mappings and return 1 (to tell the caller that the next
 739 // entry has been taken care of and caller should skip it).
 740 static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
 741   // This should never have accepted Bad before
 742   assert(OptoReg::is_valid(regnum), "location must be valid");
 743   return (OptoReg::is_reg(regnum))
 744          ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
 745          : new LocationValue(Location::new_stk_loc(l_type,  ra->reg2offset(regnum)));
 746 }
 747 
 748 
 749 ObjectValue*
 750 PhaseOutput::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
 751   for (int i = 0; i < objs->length(); i++) {
 752     assert(objs->at(i)->is_object(), "corrupt object cache");
 753     ObjectValue* sv = objs->at(i)->as_ObjectValue();
 754     if (sv->id() == id) {
 755       return sv;
 756     }
 757   }
 758   // Otherwise..
 759   return nullptr;
 760 }
 761 
 762 void PhaseOutput::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
 763                                      ObjectValue* sv ) {
 764   assert(sv_for_node_id(objs, sv->id()) == nullptr, "Precondition");
 765   objs->append(sv);
 766 }
 767 
 768 
 769 void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
 770                             GrowableArray<ScopeValue*> *array,
 771                             GrowableArray<ScopeValue*> *objs ) {
 772   assert( local, "use _top instead of null" );
 773   if (array->length() != idx) {
 774     assert(array->length() == idx + 1, "Unexpected array count");
 775     // Old functionality:
 776     //   return
 777     // New functionality:
 778     //   Assert if the local is not top. In product mode let the new node
 779     //   override the old entry.
 780     assert(local == C->top(), "LocArray collision");
 781     if (local == C->top()) {
 782       return;
 783     }
 784     array->pop();
 785   }
 786   const Type *t = local->bottom_type();
 787 
 788   // Is it a safepoint scalar object node?
 789   if (local->is_SafePointScalarObject()) {
 790     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 791 
 792     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 793     if (sv == nullptr) {
 794       ciKlass* cik = t->is_oopptr()->exact_klass();
 795       assert(cik->is_instance_klass() ||
 796              cik->is_array_klass(), "Not supported allocation.");
 797       uint first_ind = spobj->first_index(sfpt->jvms());
 798       // Nullable, scalarized inline types have a null_marker input
 799       // that needs to be checked before using the field values.
 800       ScopeValue* properties = nullptr;
 801       if (cik->is_inlinetype()) {
 802         Node* null_marker_node = sfpt->in(first_ind++);
 803         assert(null_marker_node != nullptr, "null_marker node not found");
 804         if (!null_marker_node->is_top()) {
 805           const TypeInt* null_marker_type = null_marker_node->bottom_type()->is_int();
 806           if (null_marker_node->is_Con()) {
 807             properties = new ConstantIntValue(null_marker_type->get_con());
 808           } else {
 809             OptoReg::Name null_marker_reg = C->regalloc()->get_reg_first(null_marker_node);
 810             properties = new_loc_value(C->regalloc(), null_marker_reg, Location::normal);
 811           }
 812         }
 813       }
 814       if (cik->is_array_klass() && !cik->is_type_array_klass()) {
 815         ciArrayKlass* ciak = cik->as_array_klass();
 816         const bool is_element_inline = ciak->element_klass()->is_inlinetype();
 817 
 818         const ArrayProperties props = ArrayProperties::Default()
 819           .with_null_restricted(is_element_inline && ciak->is_elem_null_free())
 820           .with_non_atomic(is_element_inline && !ciak->is_elem_atomic());
 821 
 822         properties = new ConstantIntValue((jint)props.value());
 823       }
 824       sv = new ObjectValue(spobj->_idx,
 825                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
 826       set_sv_for_object_node(objs, sv);
 827 
 828       for (uint i = 0; i < spobj->n_fields(); i++) {
 829         Node* fld_node = sfpt->in(first_ind+i);
 830         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 831       }
 832     }
 833     array->append(sv);
 834     return;
 835   } else if (local->is_SafePointScalarMerge()) {
 836     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 837     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 838 
 839     if (mv == nullptr) {
 840       GrowableArray<ScopeValue*> deps;
 841 
 842       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 843       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 844       assert(deps.length() == 1, "missing value");
 845 
 846       int selector_idx = smerge->selector_idx(sfpt->jvms());
 847       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
 848       assert(deps.length() == 2, "missing value");
 849 
 850       mv = new ObjectMergeValue(smerge->_idx, deps.at(0), deps.at(1));
 851       set_sv_for_object_node(objs, mv);
 852 
 853       for (uint i = 1; i < smerge->req(); i++) {
 854         Node* obj_node = smerge->in(i);
 855         int idx = mv->possible_objects()->length();
 856         (void)FillLocArray(idx, sfpt, obj_node, mv->possible_objects(), objs);
 857 
 858         // By default ObjectValues that are in 'possible_objects' are not root objects.
 859         // They will be marked as root later if they are directly referenced in a JVMS.
 860         assert(mv->possible_objects()->length() > idx, "Didn't add entry to possible_objects?!");
 861         assert(mv->possible_objects()->at(idx)->is_object(), "Entries in possible_objects should be ObjectValue.");
 862         mv->possible_objects()->at(idx)->as_ObjectValue()->set_root(false);
 863       }
 864     }
 865     array->append(mv);
 866     return;
 867   }
 868 
 869   // Grab the register number for the local
 870   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 871   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 872     // Record the double as two float registers.
 873     // The register mask for such a value always specifies two adjacent
 874     // float registers, with the lower register number even.
 875     // Normally, the allocation of high and low words to these registers
 876     // is irrelevant, because nearly all operations on register pairs
 877     // (e.g., StoreD) treat them as a single unit.
 878     // Here, we assume in addition that the words in these two registers
 879     // stored "naturally" (by operations like StoreD and double stores
 880     // within the interpreter) such that the lower-numbered register
 881     // is written to the lower memory address.  This may seem like
 882     // a machine dependency, but it is not--it is a requirement on
 883     // the author of the <arch>.ad file to ensure that, for every
 884     // even/odd double-register pair to which a double may be allocated,
 885     // the word in the even single-register is stored to the first
 886     // memory word.  (Note that register numbers are completely
 887     // arbitrary, and are not tied to any machine-level encodings.)
 888 #ifdef _LP64
 889     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
 890       array->append(new ConstantIntValue((jint)0));
 891       array->append(new_loc_value( C->regalloc(), regnum, Location::dbl ));
 892     } else if ( t->base() == Type::Long ) {
 893       array->append(new ConstantIntValue((jint)0));
 894       array->append(new_loc_value( C->regalloc(), regnum, Location::lng ));
 895     } else if ( t->base() == Type::RawPtr ) {
 896       // jsr/ret return address which must be restored into the full
 897       // width 64-bit stack slot.
 898       array->append(new_loc_value( C->regalloc(), regnum, Location::lng ));
 899     }
 900 #else //_LP64
 901     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
 902       // Repack the double/long as two jints.
 903       // The convention the interpreter uses is that the second local
 904       // holds the first raw word of the native double representation.
 905       // This is actually reasonable, since locals and stack arrays
 906       // grow downwards in all implementations.
 907       // (If, on some machine, the interpreter's Java locals or stack
 908       // were to grow upwards, the embedded doubles would be word-swapped.)
 909       array->append(new_loc_value( C->regalloc(), OptoReg::add(regnum,1), Location::normal ));
 910       array->append(new_loc_value( C->regalloc(),              regnum   , Location::normal ));
 911     }
 912 #endif //_LP64
 913     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
 914              OptoReg::is_reg(regnum) ) {
 915       array->append(new_loc_value( C->regalloc(), regnum, Matcher::float_in_double()
 916                                                       ? Location::float_in_dbl : Location::normal ));
 917     } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
 918       array->append(new_loc_value( C->regalloc(), regnum, Matcher::int_in_long
 919                                                       ? Location::int_in_long : Location::normal ));
 920     } else if( t->base() == Type::NarrowOop ) {
 921       array->append(new_loc_value( C->regalloc(), regnum, Location::narrowoop ));
 922     } else if (t->base() == Type::VectorA || t->base() == Type::VectorS ||
 923                t->base() == Type::VectorD || t->base() == Type::VectorX ||
 924                t->base() == Type::VectorY || t->base() == Type::VectorZ) {
 925       array->append(new_loc_value( C->regalloc(), regnum, Location::vector ));
 926     } else if (C->regalloc()->is_oop(local)) {
 927       assert(t->base() == Type::OopPtr || t->base() == Type::InstPtr ||
 928              t->base() == Type::AryPtr,
 929              "Unexpected type: %s", t->msg());
 930       array->append(new_loc_value( C->regalloc(), regnum, Location::oop ));
 931     } else {
 932       assert(t->base() == Type::Int || t->base() == Type::Half ||
 933              t->base() == Type::FloatCon || t->base() == Type::FloatBot,
 934              "Unexpected type: %s", t->msg());
 935       array->append(new_loc_value( C->regalloc(), regnum, Location::normal ));
 936     }
 937     return;
 938   }
 939 
 940   // No register.  It must be constant data.
 941   switch (t->base()) {
 942     case Type::Half:              // Second half of a double
 943       ShouldNotReachHere();       // Caller should skip 2nd halves
 944       break;
 945     case Type::AnyPtr:
 946       array->append(new ConstantOopWriteValue(nullptr));
 947       break;
 948     case Type::AryPtr:
 949     case Type::InstPtr:          // fall through
 950       array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
 951       break;
 952     case Type::NarrowOop:
 953       if (t == TypeNarrowOop::NULL_PTR) {
 954         array->append(new ConstantOopWriteValue(nullptr));
 955       } else {
 956         array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
 957       }
 958       break;
 959     case Type::Int:
 960       array->append(new ConstantIntValue(t->is_int()->get_con()));
 961       break;
 962     case Type::RawPtr:
 963       // A return address (T_ADDRESS).
 964       assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
 965 #ifdef _LP64
 966       // Must be restored to the full-width 64-bit stack slot.
 967       array->append(new ConstantLongValue(t->is_ptr()->get_con()));
 968 #else
 969       array->append(new ConstantIntValue(t->is_ptr()->get_con()));
 970 #endif
 971       break;
 972     case Type::FloatCon: {
 973       float f = t->is_float_constant()->getf();
 974       array->append(new ConstantIntValue(jint_cast(f)));
 975       break;
 976     }
 977     case Type::DoubleCon: {
 978       jdouble d = t->is_double_constant()->getd();
 979 #ifdef _LP64
 980       array->append(new ConstantIntValue((jint)0));
 981       array->append(new ConstantDoubleValue(d));
 982 #else
 983       // Repack the double as two jints.
 984     // The convention the interpreter uses is that the second local
 985     // holds the first raw word of the native double representation.
 986     // This is actually reasonable, since locals and stack arrays
 987     // grow downwards in all implementations.
 988     // (If, on some machine, the interpreter's Java locals or stack
 989     // were to grow upwards, the embedded doubles would be word-swapped.)
 990     jlong_accessor acc;
 991     acc.long_value = jlong_cast(d);
 992     array->append(new ConstantIntValue(acc.words[1]));
 993     array->append(new ConstantIntValue(acc.words[0]));
 994 #endif
 995       break;
 996     }
 997     case Type::Long: {
 998       jlong d = t->is_long()->get_con();
 999 #ifdef _LP64
1000       array->append(new ConstantIntValue((jint)0));
1001       array->append(new ConstantLongValue(d));
1002 #else
1003       // Repack the long as two jints.
1004     // The convention the interpreter uses is that the second local
1005     // holds the first raw word of the native double representation.
1006     // This is actually reasonable, since locals and stack arrays
1007     // grow downwards in all implementations.
1008     // (If, on some machine, the interpreter's Java locals or stack
1009     // were to grow upwards, the embedded doubles would be word-swapped.)
1010     jlong_accessor acc;
1011     acc.long_value = d;
1012     array->append(new ConstantIntValue(acc.words[1]));
1013     array->append(new ConstantIntValue(acc.words[0]));
1014 #endif
1015       break;
1016     }
1017     case Type::Top:               // Add an illegal value here
1018       array->append(new LocationValue(Location()));
1019       break;
1020     default:
1021       ShouldNotReachHere();
1022       break;
1023   }
1024 }
1025 
1026 // Determine if this node starts a bundle
1027 bool PhaseOutput::starts_bundle(const Node *n) const {
1028   return (_node_bundling_limit > n->_idx &&
1029           _node_bundling_base[n->_idx].starts_bundle());
1030 }
1031 
1032 // Determine if there is a monitor that has 'ov' as its owner.
1033 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
1034   for (int k = 0; k < monarray->length(); k++) {
1035     MonitorValue* mv = monarray->at(k);
1036     if (mv->owner() == ov) {
1037       return true;
1038     }
1039   }
1040 
1041   return false;
1042 }
1043 
1044 // Determine if there is a scalar replaced object description represented by 'ov'.
1045 bool PhaseOutput::contains_as_scalarized_obj(JVMState* jvms, MachSafePointNode* sfn,
1046                                              GrowableArray<ScopeValue*>* objs,
1047                                              ObjectValue* ov) const {
1048   for (int i = 0; i < jvms->scl_size(); i++) {
1049     Node* n = sfn->scalarized_obj(jvms, i);
1050     // Other kinds of nodes that we may encounter here, for instance constants
1051     // representing values of fields of objects scalarized, aren't relevant for
1052     // us, since they don't map to ObjectValue.
1053     if (!n->is_SafePointScalarObject()) {
1054       continue;
1055     }
1056 
1057     ObjectValue* other = sv_for_node_id(objs, n->_idx);
1058     if (ov == other) {
1059       return true;
1060     }
1061   }
1062   return false;
1063 }
1064 
1065 //--------------------------Process_OopMap_Node--------------------------------
1066 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1067   // Handle special safepoint nodes for synchronization
1068   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1069   MachCallNode      *mcall;
1070 
1071   int safepoint_pc_offset = current_offset;
1072   bool return_oop = false;
1073   bool return_scalarized = false;
1074   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1075   bool arg_escape = false;
1076 
1077   // Add the safepoint in the DebugInfoRecorder
1078   if( !mach->is_MachCall() ) {
1079     mcall = nullptr;
1080     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1081   } else {
1082     mcall = mach->as_MachCall();
1083 
1084     if (mcall->is_MachCallJava()) {
1085       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1086     }
1087 
1088     // Check if a call returns an object.
1089     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1090       return_oop = true;
1091     }
1092     if (mcall->returns_scalarized()) {
1093       return_scalarized = true;
1094     }
1095     safepoint_pc_offset += mcall->ret_addr_offset();
1096     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1097   }
1098 
1099   // Loop over the JVMState list to add scope information
1100   // Do not skip safepoints with a null method, they need monitor info
1101   JVMState* youngest_jvms = sfn->jvms();
1102   int max_depth = youngest_jvms->depth();
1103 
1104   // Allocate the object pool for scalar-replaced objects -- the map from
1105   // small-integer keys (which can be recorded in the local and ostack
1106   // arrays) to descriptions of the object state.
1107   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1108 
1109   // Visit scopes from oldest to youngest.
1110   for (int depth = 1; depth <= max_depth; depth++) {
1111     JVMState* jvms = youngest_jvms->of_depth(depth);
1112     int idx;
1113     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1114     // Safepoints that do not have method() set only provide oop-map and monitor info
1115     // to support GC; these do not support deoptimization.
1116     int num_locs = (method == nullptr) ? 0 : jvms->loc_size();
1117     int num_exps = (method == nullptr) ? 0 : jvms->stk_size();
1118     int num_mon  = jvms->nof_monitors();
1119     assert(method == nullptr || jvms->bci() < 0 || num_locs == method->max_locals(),
1120            "JVMS local count must match that of the method");
1121 
1122     // Add Local and Expression Stack Information
1123 
1124     // Insert locals into the locarray
1125     GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
1126     for( idx = 0; idx < num_locs; idx++ ) {
1127       FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
1128     }
1129 
1130     // Insert expression stack entries into the exparray
1131     GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
1132     for( idx = 0; idx < num_exps; idx++ ) {
1133       FillLocArray( idx,  sfn, sfn->stack(jvms, idx), exparray, objs );
1134     }
1135 
1136     // Add in mappings of the monitors
1137     assert( !method ||
1138             !method->is_synchronized() ||
1139             method->is_native() ||
1140             num_mon > 0,
1141             "monitors must always exist for synchronized methods");
1142 
1143     // Build the growable array of ScopeValues for exp stack
1144     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1145 
1146     // Loop over monitors and insert into array
1147     for (idx = 0; idx < num_mon; idx++) {
1148       // Grab the node that defines this monitor
1149       Node* box_node = sfn->monitor_box(jvms, idx);
1150       Node* obj_node = sfn->monitor_obj(jvms, idx);
1151 
1152       // Create ScopeValue for object
1153       ScopeValue *scval = nullptr;
1154 
1155       if (obj_node->is_SafePointScalarObject()) {
1156         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1157         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1158         if (scval == nullptr) {
1159           const Type *t = spobj->bottom_type();
1160           ciKlass* cik = t->is_oopptr()->exact_klass();
1161           assert(cik->is_instance_klass() ||
1162                  cik->is_array_klass(), "Not supported allocation.");
1163           assert(!cik->is_inlinetype(), "Synchronization on value object?");
1164           ScopeValue* properties = nullptr;
1165           if (cik->is_array_klass() && !cik->is_type_array_klass()) {
1166             ciArrayKlass* ciak = cik->as_array_klass();
1167             const bool is_element_inline = ciak->element_klass()->is_inlinetype();
1168 
1169             const ArrayProperties props = ArrayProperties::Default()
1170               .with_null_restricted(is_element_inline && ciak->is_elem_null_free())
1171               .with_non_atomic(is_element_inline && !ciak->is_elem_atomic());
1172 
1173             properties = new ConstantIntValue((jint)props.value());
1174           }
1175           ObjectValue* sv = new ObjectValue(spobj->_idx,
1176                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
1177           PhaseOutput::set_sv_for_object_node(objs, sv);
1178 
1179           uint first_ind = spobj->first_index(youngest_jvms);
1180           for (uint i = 0; i < spobj->n_fields(); i++) {
1181             Node* fld_node = sfn->in(first_ind+i);
1182             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1183           }
1184           scval = sv;
1185         }
1186       } else if (obj_node->is_SafePointScalarMerge()) {
1187         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1188         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1189 
1190         if (mv == nullptr) {
1191           GrowableArray<ScopeValue*> deps;
1192 
1193           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1194           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1195           assert(deps.length() == 1, "missing value");
1196 
1197           int selector_idx = smerge->selector_idx(youngest_jvms);
1198           FillLocArray(1, nullptr, sfn->in(selector_idx), &deps, nullptr);
1199           assert(deps.length() == 2, "missing value");
1200 
1201           mv = new ObjectMergeValue(smerge->_idx, deps.at(0), deps.at(1));
1202           set_sv_for_object_node(objs, mv);
1203 
1204           for (uint i = 1; i < smerge->req(); i++) {
1205             Node* obj_node = smerge->in(i);
1206             int idx = mv->possible_objects()->length();
1207             (void)FillLocArray(idx, sfn, obj_node, mv->possible_objects(), objs);
1208 
1209             // By default ObjectValues that are in 'possible_objects' are not root objects.
1210             // They will be marked as root later if they are directly referenced in a JVMS.
1211             assert(mv->possible_objects()->length() > idx, "Didn't add entry to possible_objects?!");
1212             assert(mv->possible_objects()->at(idx)->is_object(), "Entries in possible_objects should be ObjectValue.");
1213             mv->possible_objects()->at(idx)->as_ObjectValue()->set_root(false);
1214           }
1215         }
1216         scval = mv;
1217       } else if (!obj_node->is_Con()) {
1218         OptoReg::Name obj_reg = C->regalloc()->get_reg_first(obj_node);
1219         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
1220           scval = new_loc_value( C->regalloc(), obj_reg, Location::narrowoop );
1221         } else {
1222           scval = new_loc_value( C->regalloc(), obj_reg, Location::oop );
1223         }
1224       } else {
1225         const TypePtr *tp = obj_node->get_ptr_type();
1226         scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
1227       }
1228 
1229       OptoReg::Name box_reg = BoxLockNode::reg(box_node);
1230       Location basic_lock = Location::new_stk_loc(Location::normal,C->regalloc()->reg2offset(box_reg));
1231       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
1232       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
1233     }
1234 
1235     // Mark ObjectValue nodes as root nodes if they are directly
1236     // referenced in the JVMS.
1237     for (int i = 0; i < objs->length(); i++) {
1238       ScopeValue* sv = objs->at(i);
1239       if (sv->is_object_merge()) {
1240         ObjectMergeValue* merge = sv->as_ObjectMergeValue();
1241 
1242         for (int j = 0; j< merge->possible_objects()->length(); j++) {
1243           ObjectValue* ov = merge->possible_objects()->at(j)->as_ObjectValue();
1244           if (ov->is_root()) {
1245             // Already flagged as 'root' by something else. We shouldn't change it
1246             // to non-root in a younger JVMS because it may need to be alive in
1247             // a younger JVMS.
1248           } else {
1249             bool is_root = locarray->contains(ov) ||
1250                            exparray->contains(ov) ||
1251                            contains_as_owner(monarray, ov) ||
1252                            contains_as_scalarized_obj(jvms, sfn, objs, ov);
1253             ov->set_root(is_root);
1254           }
1255         }
1256       }
1257     }
1258 
1259     // We dump the object pool first, since deoptimization reads it in first.
1260     C->debug_info()->dump_object_pool(objs);
1261 
1262     // Build first class objects to pass to scope
1263     DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
1264     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1265     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1266 
1267     // Make method available for all Safepoints
1268     ciMethod* scope_method = method ? method : C->method();
1269     // Describe the scope here
1270     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1271     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1272     // Now we can describe the scope.
1273     methodHandle null_mh;
1274     bool rethrow_exception = false;
1275     C->debug_info()->describe_scope(
1276       safepoint_pc_offset,
1277       null_mh,
1278       scope_method,
1279       jvms->bci(),
1280       jvms->should_reexecute(),
1281       rethrow_exception,
1282       return_oop,
1283       return_scalarized,
1284       has_ea_local_in_scope,
1285       arg_escape,
1286       locvals,
1287       expvals,
1288       monvals
1289     );
1290   } // End jvms loop
1291 
1292   // Mark the end of the scope set.
1293   C->debug_info()->end_safepoint(safepoint_pc_offset);
1294 }
1295 
1296 
1297 
1298 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1299 class NonSafepointEmitter {
1300     Compile*  C;
1301     JVMState* _pending_jvms;
1302     int       _pending_offset;
1303 
1304     void emit_non_safepoint();
1305 
1306  public:
1307     NonSafepointEmitter(Compile* compile) {
1308       this->C = compile;
1309       _pending_jvms = nullptr;
1310       _pending_offset = 0;
1311     }
1312 
1313     void observe_instruction(Node* n, int pc_offset) {
1314       if (!C->debug_info()->recording_non_safepoints())  return;
1315 
1316       Node_Notes* nn = C->node_notes_at(n->_idx);
1317       if (nn == nullptr || nn->jvms() == nullptr)  return;
1318       if (_pending_jvms != nullptr &&
1319           _pending_jvms->same_calls_as(nn->jvms())) {
1320         // Repeated JVMS?  Stretch it up here.
1321         _pending_offset = pc_offset;
1322       } else {
1323         if (_pending_jvms != nullptr &&
1324             _pending_offset < pc_offset) {
1325           emit_non_safepoint();
1326         }
1327         _pending_jvms = nullptr;
1328         if (pc_offset > C->debug_info()->last_pc_offset()) {
1329           // This is the only way _pending_jvms can become non-null:
1330           _pending_jvms = nn->jvms();
1331           _pending_offset = pc_offset;
1332         }
1333       }
1334     }
1335 
1336     // Stay out of the way of real safepoints:
1337     void observe_safepoint(JVMState* jvms, int pc_offset) {
1338       if (_pending_jvms != nullptr &&
1339           !_pending_jvms->same_calls_as(jvms) &&
1340           _pending_offset < pc_offset) {
1341         emit_non_safepoint();
1342       }
1343       _pending_jvms = nullptr;
1344     }
1345 
1346     void flush_at_end() {
1347       if (_pending_jvms != nullptr) {
1348         emit_non_safepoint();
1349       }
1350       _pending_jvms = nullptr;
1351     }
1352 };
1353 
1354 void NonSafepointEmitter::emit_non_safepoint() {
1355   JVMState* youngest_jvms = _pending_jvms;
1356   int       pc_offset     = _pending_offset;
1357 
1358   // Clear it now:
1359   _pending_jvms = nullptr;
1360 
1361   DebugInformationRecorder* debug_info = C->debug_info();
1362   assert(debug_info->recording_non_safepoints(), "sanity");
1363 
1364   debug_info->add_non_safepoint(pc_offset);
1365   int max_depth = youngest_jvms->depth();
1366 
1367   // Visit scopes from oldest to youngest.
1368   for (int depth = 1; depth <= max_depth; depth++) {
1369     JVMState* jvms = youngest_jvms->of_depth(depth);
1370     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1371     assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
1372     methodHandle null_mh;
1373     debug_info->describe_scope(pc_offset, null_mh, method, jvms->bci(), jvms->should_reexecute());
1374   }
1375 
1376   // Mark the end of the scope set.
1377   debug_info->end_non_safepoint(pc_offset);
1378 }
1379 
1380 //------------------------------init_buffer------------------------------------
1381 void PhaseOutput::estimate_buffer_size(int& const_req) {
1382 
1383   // Set the initially allocated size
1384   const_req = initial_const_capacity;
1385 
1386   // The extra spacing after the code is necessary on some platforms.
1387   // Sometimes we need to patch in a jump after the last instruction,
1388   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
1389 
1390   // Compute the byte offset where we can store the deopt pc.
1391   if (C->fixed_slots() != 0) {
1392     _orig_pc_slot_offset_in_bytes = C->regalloc()->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
1393   }
1394 
1395   // Compute prolog code size
1396   _frame_slots = OptoReg::reg2stack(C->matcher()->_old_SP) + C->regalloc()->_framesize;
1397   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
1398 
1399   if (C->has_mach_constant_base_node()) {
1400     uint add_size = 0;
1401     // Fill the constant table.
1402     // Note:  This must happen before shorten_branches.
1403     for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
1404       Block* b = C->cfg()->get_block(i);
1405 
1406       for (uint j = 0; j < b->number_of_nodes(); j++) {
1407         Node* n = b->get_node(j);
1408 
1409         // If the node is a MachConstantNode evaluate the constant
1410         // value section.
1411         if (n->is_MachConstant()) {
1412           MachConstantNode* machcon = n->as_MachConstant();
1413           machcon->eval_constant(C);
1414         } else if (n->is_Mach()) {
1415           // On Power there are more nodes that issue constants.
1416           add_size += (n->as_Mach()->ins_num_consts() * 8);
1417         }
1418       }
1419     }
1420 
1421     // Calculate the offsets of the constants and the size of the
1422     // constant table (including the padding to the next section).
1423     constant_table().calculate_offsets_and_size();
1424     const_req = constant_table().alignment() + constant_table().size() + add_size;
1425   }
1426 
1427   // Initialize the space for the BufferBlob used to find and verify
1428   // instruction size in MachNode::emit_size()
1429   init_scratch_buffer_blob(const_req);
1430 }
1431 
1432 CodeBuffer* PhaseOutput::init_buffer() {
1433   int stub_req  = _buf_sizes._stub;
1434   int code_req  = _buf_sizes._code;
1435   int const_req = _buf_sizes._const;
1436 
1437   int pad_req   = NativeCall::byte_size();
1438 
1439   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1440   stub_req += bs->estimate_stub_size();
1441 
1442   // nmethod and CodeBuffer count stubs & constants as part of method's code.
1443   // class HandlerImpl is platform-specific and defined in the *.ad files.
1444   int deopt_handler_req     = HandlerImpl::size_deopt_handler()     + MAX_stubs_size; // add marginal slop for handler
1445   stub_req += MAX_stubs_size;   // ensure per-stub margin
1446   code_req += MAX_inst_size;    // ensure per-instruction margin
1447 
1448   if (StressCodeBuffers)
1449     code_req = const_req = stub_req = deopt_handler_req = 0x10;  // force expansion
1450 
1451   int total_req =
1452           const_req +
1453           code_req +
1454           pad_req +
1455           stub_req +
1456           deopt_handler_req;               // deopt handler
1457 
1458   CodeBuffer* cb = code_buffer();
1459   cb->set_const_section_alignment(constant_table().alignment());
1460   cb->initialize(total_req, _buf_sizes._reloc);
1461 
1462   // Have we run out of code space?
1463   if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1464     C->record_failure("CodeCache is full");
1465     return nullptr;
1466   }
1467   // Configure the code buffer.
1468   cb->initialize_consts_size(const_req);
1469   cb->initialize_stubs_size(stub_req);
1470   cb->initialize_oop_recorder(C->env()->oop_recorder());
1471 
1472   return cb;
1473 }
1474 
1475 //------------------------------fill_buffer------------------------------------
1476 void PhaseOutput::fill_buffer(C2_MacroAssembler* masm, uint* blk_starts) {
1477   // blk_starts[] contains offsets calculated during short branches processing,
1478   // offsets should not be increased during following steps.
1479 
1480   // Compute the size of first NumberOfLoopInstrToAlign instructions at head
1481   // of a loop. It is used to determine the padding for loop alignment.
1482   Compile::TracePhase tp(_t_fillBuffer);
1483 
1484   compute_loop_first_inst_sizes();
1485 
1486   // Create oopmap set.
1487   _oop_map_set = new OopMapSet();
1488 
1489   // !!!!! This preserves old handling of oopmaps for now
1490   C->debug_info()->set_oopmaps(_oop_map_set);
1491 
1492   uint nblocks  = C->cfg()->number_of_blocks();
1493   // Count and start of implicit null check instructions
1494   uint inct_cnt = 0;
1495   uint* inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1496 
1497   // Count and start of calls
1498   uint* call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
1499 
1500   uint  return_offset = 0;
1501   int nop_size = (new MachNopNode())->size(C->regalloc());
1502 
1503   int previous_offset = 0;
1504   int current_offset  = 0;
1505   int last_call_offset = -1;
1506   int last_avoid_back_to_back_offset = -1;
1507 #ifdef ASSERT
1508   uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
1509   uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
1510   uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
1511   uint* jmp_rule   = NEW_RESOURCE_ARRAY(uint,nblocks);
1512 #endif
1513 
1514   // Create an array of unused labels, one for each basic block, if printing is enabled
1515 #if defined(SUPPORT_OPTO_ASSEMBLY)
1516   int* node_offsets      = nullptr;
1517   uint node_offset_limit = C->unique();
1518 
1519   if (C->print_assembly()) {
1520     node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
1521   }
1522   if (node_offsets != nullptr) {
1523     // We need to initialize. Unused array elements may contain garbage and mess up PrintOptoAssembly.
1524     memset(node_offsets, 0, node_offset_limit*sizeof(int));
1525   }
1526 #endif
1527 
1528   NonSafepointEmitter non_safepoints(C);  // emit non-safepoints lazily
1529 
1530   // Emit the constant table.
1531   if (C->has_mach_constant_base_node()) {
1532     if (!constant_table().emit(masm)) {
1533       C->record_failure("consts section overflow");
1534       return;
1535     }
1536   }
1537 
1538   // Create an array of labels, one for each basic block
1539   Label* blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
1540   for (uint i = 0; i <= nblocks; i++) {
1541     blk_labels[i].init();
1542   }
1543 
1544   // Now fill in the code buffer
1545   for (uint i = 0; i < nblocks; i++) {
1546     Block* block = C->cfg()->get_block(i);
1547     _block = block;
1548     Node* head = block->head();
1549 
1550     // If this block needs to start aligned (i.e, can be reached other
1551     // than by falling-thru from the previous block), then force the
1552     // start of a new bundle.
1553     if (Pipeline::requires_bundling() && starts_bundle(head)) {
1554       masm->code()->flush_bundle(true);
1555     }
1556 
1557 #ifdef ASSERT
1558     if (!block->is_connector()) {
1559       stringStream st;
1560       block->dump_head(C->cfg(), &st);
1561       masm->block_comment(st.freeze());
1562     }
1563     jmp_target[i] = 0;
1564     jmp_offset[i] = 0;
1565     jmp_size[i]   = 0;
1566     jmp_rule[i]   = 0;
1567 #endif
1568     int blk_offset = current_offset;
1569 
1570     // Define the label at the beginning of the basic block
1571     masm->bind(blk_labels[block->_pre_order]);
1572 
1573     uint last_inst = block->number_of_nodes();
1574 
1575     // Emit block normally, except for last instruction.
1576     // Emit means "dump code bits into code buffer".
1577     for (uint j = 0; j<last_inst; j++) {
1578       _index = j;
1579 
1580       // Get the node
1581       Node* n = block->get_node(j);
1582 
1583       // If this starts a new instruction group, then flush the current one
1584       // (but allow split bundles)
1585       if (Pipeline::requires_bundling() && starts_bundle(n))
1586         masm->code()->flush_bundle(false);
1587 
1588       // Special handling for SafePoint/Call Nodes
1589       bool is_mcall = false;
1590       if (n->is_Mach()) {
1591         MachNode *mach = n->as_Mach();
1592         is_mcall = n->is_MachCall();
1593         bool is_sfn = n->is_MachSafePoint();
1594 
1595         // If this requires all previous instructions be flushed, then do so
1596         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1597           masm->code()->flush_bundle(true);
1598           current_offset = masm->offset();
1599         }
1600 
1601         // align the instruction if necessary
1602         int padding = mach->compute_padding(current_offset);
1603         // Make sure safepoint node for polling is distinct from a call's
1604         // return by adding a nop if needed.
1605         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1606           padding = nop_size;
1607         }
1608         if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1609             current_offset == last_avoid_back_to_back_offset) {
1610           // Avoid back to back some instructions.
1611           padding = nop_size;
1612         }
1613 
1614         if (padding > 0) {
1615           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1616           int nops_cnt = padding / nop_size;
1617           MachNode *nop = new MachNopNode(nops_cnt);
1618           block->insert_node(nop, j++);
1619           last_inst++;
1620           C->cfg()->map_node_to_block(nop, block);
1621           // Ensure enough space.
1622           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1623           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1624             C->record_failure("CodeCache is full");
1625             return;
1626           }
1627           nop->emit(masm, C->regalloc());
1628           masm->code()->flush_bundle(true);
1629           current_offset = masm->offset();
1630         }
1631 
1632         bool observe_safepoint = is_sfn;
1633         // Remember the start of the last call in a basic block
1634         if (is_mcall) {
1635           MachCallNode *mcall = mach->as_MachCall();
1636 
1637           if (mcall->entry_point() != nullptr) {
1638             // This destination address is NOT PC-relative
1639             mcall->method_set((intptr_t)mcall->entry_point());
1640           }
1641 
1642           // Save the return address
1643           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1644 
1645           observe_safepoint = mcall->guaranteed_safepoint();
1646         }
1647 
1648         // sfn will be valid whenever mcall is valid now because of inheritance
1649         if (observe_safepoint) {
1650           // Handle special safepoint nodes for synchronization
1651           if (!is_mcall) {
1652             MachSafePointNode *sfn = mach->as_MachSafePoint();
1653             // !!!!! Stubs only need an oopmap right now, so bail out
1654             if (sfn->jvms()->method() == nullptr) {
1655               // Write the oopmap directly to the code blob??!!
1656               continue;
1657             }
1658           } // End synchronization
1659 
1660           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1661                                            current_offset);
1662           Process_OopMap_Node(mach, current_offset);
1663         } // End if safepoint
1664 
1665           // If this is a null check, then add the start of the previous instruction to the list
1666         else if( mach->is_MachNullCheck() ) {
1667           inct_starts[inct_cnt++] = previous_offset;
1668         }
1669 
1670           // If this is a branch, then fill in the label with the target BB's label
1671         else if (mach->is_MachBranch()) {
1672           // This requires the TRUE branch target be in succs[0]
1673           uint block_num = block->non_connector_successor(0)->_pre_order;
1674 
1675           // Try to replace long branch,
1676           // it is mostly for back branches since forward branch's
1677           // distance is not updated yet.
1678           if (mach->may_be_short_branch()) {
1679             int br_size = n->size(C->regalloc());
1680             int offset = blk_starts[block_num] - current_offset;
1681             if (block_num >= i) {
1682               // Current and following block's offset are not
1683               // finalized yet, adjust distance by the difference
1684               // between calculated and final offsets of current block.
1685               offset -= (blk_starts[i] - blk_offset);
1686             }
1687             // In the following code a nop could be inserted before
1688             // the branch which will increase the backward distance.
1689             bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1690             if (needs_padding && offset <= 0)
1691               offset -= nop_size;
1692 
1693             if (C->matcher()->is_short_branch_offset(mach->rule(), br_size, offset)) {
1694               // We've got a winner.  Replace this branch.
1695               MachNode* replacement = mach->as_MachBranch()->short_branch_version();
1696 
1697               // Update the jmp_size.
1698               int new_size = replacement->size(C->regalloc());
1699               assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1700               // Insert padding between avoid_back_to_back branches.
1701               if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
1702                 MachNode *nop = new MachNopNode();
1703                 block->insert_node(nop, j++);
1704                 C->cfg()->map_node_to_block(nop, block);
1705                 last_inst++;
1706                 nop->emit(masm, C->regalloc());
1707                 masm->code()->flush_bundle(true);
1708                 current_offset = masm->offset();
1709               }
1710 #ifdef ASSERT
1711               jmp_target[i] = block_num;
1712               jmp_offset[i] = current_offset - blk_offset;
1713               jmp_size[i]   = new_size;
1714               jmp_rule[i]   = mach->rule();
1715 #endif
1716               block->map_node(replacement, j);
1717               mach->subsume_by(replacement, C);
1718               n    = replacement;
1719               mach = replacement;
1720             }
1721           }
1722           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
1723         } else if (mach->ideal_Opcode() == Op_Jump) {
1724           for (uint h = 0; h < block->_num_succs; h++) {
1725             Block* succs_block = block->_succs[h];
1726             for (uint j = 1; j < succs_block->num_preds(); j++) {
1727               Node* jpn = succs_block->pred(j);
1728               if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1729                 uint block_num = succs_block->non_connector()->_pre_order;
1730                 Label *blkLabel = &blk_labels[block_num];
1731                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1732               }
1733             }
1734           }
1735         } else if (!n->is_Proj()) {
1736           // Remember the beginning of the previous instruction, in case
1737           // it's followed by a flag-kill and a null-check.  Happens on
1738           // Intel all the time, with add-to-memory kind of opcodes.
1739           previous_offset = current_offset;
1740         }
1741 
1742         // Not an else-if!
1743         // If this is a trap based cmp then add its offset to the list.
1744         if (mach->is_TrapBasedCheckNode()) {
1745           inct_starts[inct_cnt++] = current_offset;
1746         }
1747       }
1748 
1749       // Verify that there is sufficient space remaining
1750       masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1751       if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1752         C->record_failure("CodeCache is full");
1753         return;
1754       }
1755 
1756       // Save the offset for the listing
1757 #if defined(SUPPORT_OPTO_ASSEMBLY)
1758       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1759         node_offsets[n->_idx] = masm->offset();
1760       }
1761 #endif
1762       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1763 
1764       // "Normal" instruction case
1765       DEBUG_ONLY(uint instr_offset = masm->offset());
1766       n->emit(masm, C->regalloc());
1767       current_offset = masm->offset();
1768 
1769       // Above we only verified that there is enough space in the instruction section.
1770       // However, the instruction may emit stubs that cause code buffer expansion.
1771       // Bail out here if expansion failed due to a lack of code cache space.
1772       if (C->failing()) {
1773         return;
1774       }
1775 
1776       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1777              "ret_addr_offset() not within emitted code");
1778 #ifdef ASSERT
1779       uint n_size = n->size(C->regalloc());
1780       if (n_size < (current_offset-instr_offset)) {
1781         MachNode* mach = n->as_Mach();
1782         n->dump();
1783         mach->dump_format(C->regalloc(), tty);
1784         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1785         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1786         tty->print_cr(" ------------------- ");
1787         BufferBlob* blob = this->scratch_buffer_blob();
1788         address blob_begin = blob->content_begin();
1789         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1790         assert(false, "wrong size of mach node");
1791       }
1792 #endif
1793       non_safepoints.observe_instruction(n, current_offset);
1794 
1795       // mcall is last "call" that can be a safepoint
1796       // record it so we can see if a poll will directly follow it
1797       // in which case we'll need a pad to make the PcDesc sites unique
1798       // see  5010568. This can be slightly inaccurate but conservative
1799       // in the case that return address is not actually at current_offset.
1800       // This is a small price to pay.
1801 
1802       if (is_mcall) {
1803         last_call_offset = current_offset;
1804       }
1805 
1806       if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
1807         // Avoid back to back some instructions.
1808         last_avoid_back_to_back_offset = current_offset;
1809       }
1810 
1811     } // End for all instructions in block
1812 
1813     // If the next block is the top of a loop, pad this block out to align
1814     // the loop top a little. Helps prevent pipe stalls at loop back branches.
1815     if (i < nblocks-1) {
1816       Block *nb = C->cfg()->get_block(i + 1);
1817       int padding = nb->alignment_padding(current_offset);
1818       if( padding > 0 ) {
1819         MachNode *nop = new MachNopNode(padding / nop_size);
1820         block->insert_node(nop, block->number_of_nodes());
1821         C->cfg()->map_node_to_block(nop, block);
1822         nop->emit(masm, C->regalloc());
1823         current_offset = masm->offset();
1824       }
1825     }
1826     // Verify that the distance for generated before forward
1827     // short branches is still valid.
1828     guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
1829 
1830     // Save new block start offset
1831     blk_starts[i] = blk_offset;
1832   } // End of for all blocks
1833   blk_starts[nblocks] = current_offset;
1834 
1835   non_safepoints.flush_at_end();
1836 
1837   // Offset too large?
1838   if (C->failing())  return;
1839 
1840   // Define a pseudo-label at the end of the code
1841   masm->bind( blk_labels[nblocks] );
1842 
1843   // Compute the size of the first block
1844   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
1845 
1846 #ifdef ASSERT
1847   for (uint i = 0; i < nblocks; i++) { // For all blocks
1848     if (jmp_target[i] != 0) {
1849       int br_size = jmp_size[i];
1850       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
1851       if (!C->matcher()->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
1852         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
1853         assert(false, "Displacement too large for short jmp");
1854       }
1855     }
1856   }
1857 #endif
1858 
1859   if (!masm->code()->finalize_stubs()) {
1860     C->record_failure("CodeCache is full");
1861     return;
1862   }
1863 
1864   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1865   bs->emit_stubs(*masm->code());
1866   if (C->failing())  return;
1867 
1868   // Fill in stubs.
1869   assert(masm->inst_mark() == nullptr, "should be.");
1870   _stub_list.emit(*masm);
1871   if (C->failing())  return;
1872 
1873 #ifndef PRODUCT
1874   // Information on the size of the method, without the extraneous code
1875   Scheduling::increment_method_size(masm->offset());
1876 #endif
1877 
1878   // ------------------
1879   // Fill in exception table entries.
1880   FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
1881 
1882   // Only java methods have exception handlers and deopt handlers
1883   // class HandlerImpl is platform-specific and defined in the *.ad files.
1884   if (C->method()) {
1885     if (C->failing()) {
1886       return; // CodeBuffer::expand failed
1887     }
1888     // Emit the deopt handler code.
1889     _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(masm));
1890   }
1891 
1892   // One last check for failed CodeBuffer::expand:
1893   if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1894     C->record_failure("CodeCache is full");
1895     return;
1896   }
1897 
1898 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) || defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_OPTO_ASSEMBLY)
1899   if (C->print_assembly()) {
1900     tty->cr();
1901     tty->print_cr("============================= C2-compiled nmethod ==============================");
1902   }
1903 #endif
1904 
1905 #if defined(SUPPORT_OPTO_ASSEMBLY)
1906   // Dump the assembly code, including basic-block numbers
1907   if (C->print_assembly()) {
1908     ttyLocker ttyl;  // keep the following output all in one block
1909     if (!VMThread::should_terminate()) {  // test this under the tty lock
1910       // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1911       // We call them first and write to a stringStream, then we retake the lock to
1912       // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1913       ResourceMark rm;
1914       stringStream method_metadata_str;
1915       if (C->method() != nullptr) {
1916         C->method()->print_metadata(&method_metadata_str);
1917       }
1918       stringStream dump_asm_str;
1919       dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1920 
1921       NoSafepointVerifier nsv;
1922       ttyLocker ttyl2;
1923       // This output goes directly to the tty, not the compiler log.
1924       // To enable tools to match it up with the compilation activity,
1925       // be sure to tag this tty output with the compile ID.
1926       if (xtty != nullptr) {
1927         xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1928                    C->is_osr_compilation() ? " compile_kind='osr'" : "");
1929       }
1930       if (C->method() != nullptr) {
1931         tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1932         tty->print_raw(method_metadata_str.freeze());
1933       } else if (C->stub_name() != nullptr) {
1934         tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1935       }
1936       tty->cr();
1937       tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1938       tty->print_raw(dump_asm_str.freeze());
1939       tty->print_cr("--------------------------------------------------------------------------------");
1940       if (xtty != nullptr) {
1941         xtty->tail("opto_assembly");
1942       }
1943     }
1944   }
1945 #endif
1946 }
1947 
1948 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1949   _inc_table.set_size(cnt);
1950 
1951   uint inct_cnt = 0;
1952   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
1953     Block* block = C->cfg()->get_block(i);
1954     Node *n = nullptr;
1955     int j;
1956 
1957     // Find the branch; ignore trailing NOPs.
1958     for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1959       n = block->get_node(j);
1960       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1961         break;
1962       }
1963     }
1964 
1965     // If we didn't find anything, continue
1966     if (j < 0) {
1967       continue;
1968     }
1969 
1970     // Compute ExceptionHandlerTable subtable entry and add it
1971     // (skip empty blocks)
1972     if (n->is_Catch()) {
1973 
1974       // Get the offset of the return from the call
1975       uint call_return = call_returns[block->_pre_order];
1976 #ifdef ASSERT
1977       assert( call_return > 0, "no call seen for this basic block" );
1978       while (block->get_node(--j)->is_MachProj()) ;
1979       assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
1980 #endif
1981       // last instruction is a CatchNode, find it's CatchProjNodes
1982       int nof_succs = block->_num_succs;
1983       // allocate space
1984       GrowableArray<intptr_t> handler_bcis(nof_succs);
1985       GrowableArray<intptr_t> handler_pcos(nof_succs);
1986       // iterate through all successors
1987       for (int j = 0; j < nof_succs; j++) {
1988         Block* s = block->_succs[j];
1989         bool found_p = false;
1990         for (uint k = 1; k < s->num_preds(); k++) {
1991           Node* pk = s->pred(k);
1992           if (pk->is_CatchProj() && pk->in(0) == n) {
1993             const CatchProjNode* p = pk->as_CatchProj();
1994             found_p = true;
1995             // add the corresponding handler bci & pco information
1996             if (p->_con != CatchProjNode::fall_through_index) {
1997               // p leads to an exception handler (and is not fall through)
1998               assert(s == C->cfg()->get_block(s->_pre_order), "bad numbering");
1999               // no duplicates, please
2000               if (!handler_bcis.contains(p->handler_bci())) {
2001                 uint block_num = s->non_connector()->_pre_order;
2002                 handler_bcis.append(p->handler_bci());
2003                 handler_pcos.append(blk_labels[block_num].loc_pos());
2004               }
2005             }
2006           }
2007         }
2008         assert(found_p, "no matching predecessor found");
2009         // Note:  Due to empty block removal, one block may have
2010         // several CatchProj inputs, from the same Catch.
2011       }
2012 
2013       // Set the offset of the return from the call
2014       assert(handler_bcis.find(-1) != -1, "must have default handler");
2015       _handler_table.add_subtable(call_return, &handler_bcis, nullptr, &handler_pcos);
2016       continue;
2017     }
2018 
2019     // Handle implicit null exception table updates
2020     if (n->is_MachNullCheck()) {
2021       MachNode* access = n->in(1)->as_Mach();
2022       assert(access->barrier_data() == 0 ||
2023              access->is_late_expanded_null_check_candidate(),
2024              "Implicit null checks on memory accesses with barriers are only supported on nodes explicitly marked as null-check candidates");
2025       uint block_num = block->non_connector_successor(0)->_pre_order;
2026       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
2027       continue;
2028     }
2029     // Handle implicit exception table updates: trap instructions.
2030     if (n->is_Mach() && n->as_Mach()->is_TrapBasedCheckNode()) {
2031       uint block_num = block->non_connector_successor(0)->_pre_order;
2032       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
2033       continue;
2034     }
2035   } // End of for all blocks fill in exception table entries
2036 }
2037 
2038 // Static Variables
2039 #ifndef PRODUCT
2040 uint Scheduling::_total_nop_size = 0;
2041 uint Scheduling::_total_method_size = 0;
2042 uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
2043 #endif
2044 
2045 // Initializer for class Scheduling
2046 
2047 Scheduling::Scheduling(Arena *arena, Compile &compile)
2048         : _arena(arena),
2049           _cfg(compile.cfg()),
2050           _regalloc(compile.regalloc()),
2051           _scheduled(arena),
2052           _available(arena),
2053           _reg_node(arena),
2054           _pinch_free_list(arena),
2055           _next_node(nullptr),
2056           _bundle_instr_count(0),
2057           _bundle_cycle_number(0),
2058           _bundle_use(0, 0, resource_count, &_bundle_use_elements[0])
2059 {
2060   // Save the count
2061   _node_bundling_limit = compile.unique();
2062   uint node_max = _regalloc->node_regs_max_index();
2063 
2064   compile.output()->set_node_bundling_limit(_node_bundling_limit);
2065 
2066   // This one is persistent within the Compile class
2067   _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
2068 
2069   // Allocate space for fixed-size arrays
2070   _uses            = NEW_ARENA_ARRAY(arena, short,          node_max);
2071   _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
2072 
2073   // Clear the arrays
2074   for (uint i = 0; i < node_max; i++) {
2075     ::new (&_node_bundling_base[i]) Bundle();
2076   }
2077   memset(_uses,               0, node_max * sizeof(short));
2078   memset(_current_latency,    0, node_max * sizeof(unsigned short));
2079 
2080   // Clear the bundling information
2081   memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
2082 
2083   // Get the last node
2084   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
2085 
2086   _next_node = block->get_node(block->number_of_nodes() - 1);
2087 }
2088 
2089 // Step ahead "i" cycles
2090 void Scheduling::step(uint i) {
2091 
2092   Bundle *bundle = node_bundling(_next_node);
2093   bundle->set_starts_bundle();
2094 
2095   // Update the bundle record, but leave the flags information alone
2096   if (_bundle_instr_count > 0) {
2097     bundle->set_instr_count(_bundle_instr_count);
2098     bundle->set_resources_used(_bundle_use.resourcesUsed());
2099   }
2100 
2101   // Update the state information
2102   _bundle_instr_count = 0;
2103   _bundle_cycle_number += i;
2104   _bundle_use.step(i);
2105 }
2106 
2107 void Scheduling::step_and_clear() {
2108   Bundle *bundle = node_bundling(_next_node);
2109   bundle->set_starts_bundle();
2110 
2111   // Update the bundle record
2112   if (_bundle_instr_count > 0) {
2113     bundle->set_instr_count(_bundle_instr_count);
2114     bundle->set_resources_used(_bundle_use.resourcesUsed());
2115 
2116     _bundle_cycle_number += 1;
2117   }
2118 
2119   // Clear the bundling information
2120   _bundle_instr_count = 0;
2121   _bundle_use.reset();
2122 
2123   memcpy(_bundle_use_elements,
2124          Pipeline_Use::elaborated_elements,
2125          sizeof(Pipeline_Use::elaborated_elements));
2126 }
2127 
2128 // Perform instruction scheduling and bundling over the sequence of
2129 // instructions in backwards order.
2130 void PhaseOutput::ScheduleAndBundle() {
2131 
2132   // Don't optimize this if it isn't a method
2133   if (!C->method())
2134     return;
2135 
2136   // Don't optimize this if scheduling is disabled
2137   if (!C->do_scheduling())
2138     return;
2139 
2140   // Scheduling code works only with pairs (8 bytes) maximum.
2141   // And when the scalable vector register is used, we may spill/unspill
2142   // the whole reg regardless of the max vector size.
2143   if (C->max_vector_size() > 8 ||
2144       (C->max_vector_size() > 0 && Matcher::supports_scalable_vector())) {
2145     return;
2146   }
2147 
2148   Compile::TracePhase tp(_t_instrSched);
2149 
2150   // Create a data structure for all the scheduling information
2151   Scheduling scheduling(Thread::current()->resource_area(), *C);
2152 
2153   // Walk backwards over each basic block, computing the needed alignment
2154   // Walk over all the basic blocks
2155   scheduling.DoScheduling();
2156 
2157 #ifndef PRODUCT
2158   if (C->trace_opto_output()) {
2159     // Buffer and print all at once
2160     ResourceMark rm;
2161     stringStream ss;
2162     ss.print("\n---- After ScheduleAndBundle ----\n");
2163     print_scheduling(&ss);
2164     tty->print("%s", ss.as_string());
2165   }
2166 #endif
2167 }
2168 
2169 #ifndef PRODUCT
2170 // Separated out so that it can be called directly from debugger
2171 void PhaseOutput::print_scheduling() {
2172   print_scheduling(tty);
2173 }
2174 
2175 void PhaseOutput::print_scheduling(outputStream* output_stream) {
2176   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
2177     output_stream->print("\nBB#%03d:\n", i);
2178     Block* block = C->cfg()->get_block(i);
2179     for (uint j = 0; j < block->number_of_nodes(); j++) {
2180       Node* n = block->get_node(j);
2181       OptoReg::Name reg = C->regalloc()->get_reg_first(n);
2182       output_stream->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
2183       n->dump("\n", false, output_stream);
2184     }
2185   }
2186 }
2187 #endif
2188 
2189 // See if this node fits into the present instruction bundle
2190 bool Scheduling::NodeFitsInBundle(Node *n) {
2191   uint n_idx = n->_idx;
2192 
2193   // If the node cannot be scheduled this cycle, skip it
2194   if (_current_latency[n_idx] > _bundle_cycle_number) {
2195 #ifndef PRODUCT
2196     if (_cfg->C->trace_opto_output())
2197       tty->print("#     NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
2198                  n->_idx, _current_latency[n_idx], _bundle_cycle_number);
2199 #endif
2200     return (false);
2201   }
2202 
2203   const Pipeline *node_pipeline = n->pipeline();
2204 
2205   uint instruction_count = node_pipeline->instructionCount();
2206   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
2207     instruction_count = 0;
2208 
2209   if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
2210 #ifndef PRODUCT
2211     if (_cfg->C->trace_opto_output())
2212       tty->print("#     NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
2213                  n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
2214 #endif
2215     return (false);
2216   }
2217 
2218   // Don't allow non-machine nodes to be handled this way
2219   if (!n->is_Mach() && instruction_count == 0)
2220     return (false);
2221 
2222   // See if there is any overlap
2223   uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
2224 
2225   if (delay > 0) {
2226 #ifndef PRODUCT
2227     if (_cfg->C->trace_opto_output())
2228       tty->print("#     NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
2229 #endif
2230     return false;
2231   }
2232 
2233 #ifndef PRODUCT
2234   if (_cfg->C->trace_opto_output())
2235     tty->print("#     NodeFitsInBundle [%4d]:  TRUE\n", n_idx);
2236 #endif
2237 
2238   return true;
2239 }
2240 
2241 Node * Scheduling::ChooseNodeToBundle() {
2242   uint siz = _available.size();
2243 
2244   if (siz == 0) {
2245 
2246 #ifndef PRODUCT
2247     if (_cfg->C->trace_opto_output())
2248       tty->print("#   ChooseNodeToBundle: null\n");
2249 #endif
2250     return (nullptr);
2251   }
2252 
2253   // Fast path, if only 1 instruction in the bundle
2254   if (siz == 1) {
2255 #ifndef PRODUCT
2256     if (_cfg->C->trace_opto_output()) {
2257       tty->print("#   ChooseNodeToBundle (only 1): ");
2258       _available[0]->dump();
2259     }
2260 #endif
2261     return (_available[0]);
2262   }
2263 
2264   // Don't bother, if the bundle is already full
2265   if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
2266     for ( uint i = 0; i < siz; i++ ) {
2267       Node *n = _available[i];
2268 
2269       // Skip projections, we'll handle them another way
2270       if (n->is_Proj())
2271         continue;
2272 
2273       // This presupposed that instructions are inserted into the
2274       // available list in a legality order; i.e. instructions that
2275       // must be inserted first are at the head of the list
2276       if (NodeFitsInBundle(n)) {
2277 #ifndef PRODUCT
2278         if (_cfg->C->trace_opto_output()) {
2279           tty->print("#   ChooseNodeToBundle: ");
2280           n->dump();
2281         }
2282 #endif
2283         return (n);
2284       }
2285     }
2286   }
2287 
2288   // Nothing fits in this bundle, choose the highest priority
2289 #ifndef PRODUCT
2290   if (_cfg->C->trace_opto_output()) {
2291     tty->print("#   ChooseNodeToBundle: ");
2292     _available[0]->dump();
2293   }
2294 #endif
2295 
2296   return _available[0];
2297 }
2298 
2299 int Scheduling::compare_two_spill_nodes(Node* first, Node* second) {
2300   assert(first->is_MachSpillCopy() && second->is_MachSpillCopy(), "");
2301 
2302   OptoReg::Name first_src_lo = _regalloc->get_reg_first(first->in(1));
2303   OptoReg::Name first_dst_lo = _regalloc->get_reg_first(first);
2304   OptoReg::Name second_src_lo = _regalloc->get_reg_first(second->in(1));
2305   OptoReg::Name second_dst_lo = _regalloc->get_reg_first(second);
2306 
2307   // Comparison between stack -> reg and stack -> reg
2308   if (OptoReg::is_stack(first_src_lo) && OptoReg::is_stack(second_src_lo) &&
2309       OptoReg::is_reg(first_dst_lo) && OptoReg::is_reg(second_dst_lo)) {
2310     return _regalloc->reg2offset(first_src_lo) - _regalloc->reg2offset(second_src_lo);
2311   }
2312 
2313   // Comparison between reg -> stack and reg -> stack
2314   if (OptoReg::is_stack(first_dst_lo) && OptoReg::is_stack(second_dst_lo) &&
2315       OptoReg::is_reg(first_src_lo) && OptoReg::is_reg(second_src_lo)) {
2316     return _regalloc->reg2offset(first_dst_lo) - _regalloc->reg2offset(second_dst_lo);
2317   }
2318 
2319   return 0; // Not comparable
2320 }
2321 
2322 void Scheduling::AddNodeToAvailableList(Node *n) {
2323   assert( !n->is_Proj(), "projections never directly made available" );
2324 #ifndef PRODUCT
2325   if (_cfg->C->trace_opto_output()) {
2326     tty->print("#   AddNodeToAvailableList: ");
2327     n->dump();
2328   }
2329 #endif
2330 
2331   int latency = _current_latency[n->_idx];
2332 
2333   // Insert in latency order (insertion sort). If two MachSpillCopyNodes
2334   // for stack spilling or unspilling have the same latency, we sort
2335   // them in the order of stack offset. Some ports (e.g. aarch64) may also
2336   // have more opportunities to do ld/st merging
2337   uint i;
2338   for (i = 0; i < _available.size(); i++) {
2339     if (_current_latency[_available[i]->_idx] > latency) {
2340       break;
2341     } else if (_current_latency[_available[i]->_idx] == latency &&
2342                n->is_MachSpillCopy() && _available[i]->is_MachSpillCopy() &&
2343                compare_two_spill_nodes(n, _available[i]) > 0) {
2344       break;
2345     }
2346   }
2347 
2348   // Special Check for compares following branches
2349   if( n->is_Mach() && _scheduled.size() > 0 ) {
2350     int op = n->as_Mach()->ideal_Opcode();
2351     Node *last = _scheduled[0];
2352     if( last->is_MachIf() && last->in(1) == n &&
2353         ( op == Op_CmpI ||
2354           op == Op_CmpU ||
2355           op == Op_CmpUL ||
2356           op == Op_CmpP ||
2357           op == Op_CmpF ||
2358           op == Op_CmpD ||
2359           op == Op_CmpL ) ) {
2360 
2361       // Recalculate position, moving to front of same latency
2362       for ( i=0 ; i < _available.size(); i++ )
2363         if (_current_latency[_available[i]->_idx] >= latency)
2364           break;
2365     }
2366   }
2367 
2368   // Insert the node in the available list
2369   _available.insert(i, n);
2370 
2371 #ifndef PRODUCT
2372   if (_cfg->C->trace_opto_output())
2373     dump_available();
2374 #endif
2375 }
2376 
2377 void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
2378   for ( uint i=0; i < n->len(); i++ ) {
2379     Node *def = n->in(i);
2380     if (!def) continue;
2381     if( def->is_Proj() )        // If this is a machine projection, then
2382       def = def->in(0);         // propagate usage thru to the base instruction
2383 
2384     if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
2385       continue;
2386     }
2387 
2388     // Compute the latency
2389     uint l = _bundle_cycle_number + n->latency(i);
2390     if (_current_latency[def->_idx] < l)
2391       _current_latency[def->_idx] = l;
2392 
2393     // If this does not have uses then schedule it
2394     if ((--_uses[def->_idx]) == 0)
2395       AddNodeToAvailableList(def);
2396   }
2397 }
2398 
2399 void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
2400 #ifndef PRODUCT
2401   if (_cfg->C->trace_opto_output()) {
2402     tty->print("#   AddNodeToBundle: ");
2403     n->dump();
2404   }
2405 #endif
2406 
2407   // Remove this from the available list
2408   uint i;
2409   for (i = 0; i < _available.size(); i++)
2410     if (_available[i] == n)
2411       break;
2412   assert(i < _available.size(), "entry in _available list not found");
2413   _available.remove(i);
2414 
2415   // See if this fits in the current bundle
2416   const Pipeline *node_pipeline = n->pipeline();
2417   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
2418 
2419 
2420   // Get the number of instructions
2421   uint instruction_count = node_pipeline->instructionCount();
2422   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
2423     instruction_count = 0;
2424 
2425   // Compute the latency information
2426   uint delay = 0;
2427 
2428   if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
2429     int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
2430     if (relative_latency < 0)
2431       relative_latency = 0;
2432 
2433     delay = _bundle_use.full_latency(relative_latency, node_usage);
2434 
2435     // Does not fit in this bundle, start a new one
2436     if (delay > 0) {
2437       step(delay);
2438 
2439 #ifndef PRODUCT
2440       if (_cfg->C->trace_opto_output())
2441         tty->print("#  *** STEP(%d) ***\n", delay);
2442 #endif
2443     }
2444   }
2445 
2446   if (delay == 0) {
2447     if (node_pipeline->hasMultipleBundles()) {
2448 #ifndef PRODUCT
2449       if (_cfg->C->trace_opto_output())
2450         tty->print("#  *** STEP(multiple instructions) ***\n");
2451 #endif
2452       step(1);
2453     }
2454 
2455     else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
2456 #ifndef PRODUCT
2457       if (_cfg->C->trace_opto_output())
2458         tty->print("#  *** STEP(%d >= %d instructions) ***\n",
2459                    instruction_count + _bundle_instr_count,
2460                    Pipeline::_max_instrs_per_cycle);
2461 #endif
2462       step(1);
2463     }
2464   }
2465 
2466   // Set the node's latency
2467   _current_latency[n->_idx] = _bundle_cycle_number;
2468 
2469   // Now merge the functional unit information
2470   if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
2471     _bundle_use.add_usage(node_usage);
2472 
2473   // Increment the number of instructions in this bundle
2474   _bundle_instr_count += instruction_count;
2475 
2476   // Remember this node for later
2477   if (n->is_Mach())
2478     _next_node = n;
2479 
2480   // It's possible to have a BoxLock in the graph and in the _bbs mapping but
2481   // not in the bb->_nodes array.  This happens for debug-info-only BoxLocks.
2482   // 'Schedule' them (basically ignore in the schedule) but do not insert them
2483   // into the block.  All other scheduled nodes get put in the schedule here.
2484   int op = n->Opcode();
2485   if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
2486       (op != Op_Node &&         // Not an unused antidepedence node and
2487        // not an unallocated boxlock
2488        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2489 
2490     // Push any trailing projections
2491     if( bb->get_node(bb->number_of_nodes()-1) != n ) {
2492       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2493         Node *foi = n->fast_out(i);
2494         if( foi->is_Proj() )
2495           _scheduled.push(foi);
2496       }
2497     }
2498 
2499     // Put the instruction in the schedule list
2500     _scheduled.push(n);
2501   }
2502 
2503 #ifndef PRODUCT
2504   if (_cfg->C->trace_opto_output())
2505     dump_available();
2506 #endif
2507 
2508   // Walk all the definitions, decrementing use counts, and
2509   // if a definition has a 0 use count, place it in the available list.
2510   DecrementUseCounts(n,bb);
2511 }
2512 
2513 // This method sets the use count within a basic block.  We will ignore all
2514 // uses outside the current basic block.  As we are doing a backwards walk,
2515 // any node we reach that has a use count of 0 may be scheduled.  This also
2516 // avoids the problem of cyclic references from phi nodes, as long as phi
2517 // nodes are at the front of the basic block.  This method also initializes
2518 // the available list to the set of instructions that have no uses within this
2519 // basic block.
2520 void Scheduling::ComputeUseCount(const Block *bb) {
2521 #ifndef PRODUCT
2522   if (_cfg->C->trace_opto_output())
2523     tty->print("# -> ComputeUseCount\n");
2524 #endif
2525 
2526   // Clear the list of available and scheduled instructions, just in case
2527   _available.clear();
2528   _scheduled.clear();
2529 
2530 #ifdef ASSERT
2531   for( uint i=0; i < bb->number_of_nodes(); i++ )
2532     assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
2533 #endif
2534 
2535   // Force the _uses count to never go to zero for unscheduable pieces
2536   // of the block
2537   for( uint k = 0; k < _bb_start; k++ )
2538     _uses[bb->get_node(k)->_idx] = 1;
2539   for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
2540     _uses[bb->get_node(l)->_idx] = 1;
2541 
2542   // Iterate backwards over the instructions in the block.  Don't count the
2543   // branch projections at end or the block header instructions.
2544   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2545     Node *n = bb->get_node(j);
2546     if( n->is_Proj() ) continue; // Projections handled another way
2547 
2548     // Account for all uses
2549     for ( uint k = 0; k < n->len(); k++ ) {
2550       Node *inp = n->in(k);
2551       if (!inp) continue;
2552       assert(inp != n, "no cycles allowed" );
2553       if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
2554         if (inp->is_Proj()) { // Skip through Proj's
2555           inp = inp->in(0);
2556         }
2557         ++_uses[inp->_idx];     // Count 1 block-local use
2558       }
2559     }
2560 
2561     // If this instruction has a 0 use count, then it is available
2562     if (!_uses[n->_idx]) {
2563       _current_latency[n->_idx] = _bundle_cycle_number;
2564       AddNodeToAvailableList(n);
2565     }
2566 
2567 #ifndef PRODUCT
2568     if (_cfg->C->trace_opto_output()) {
2569       tty->print("#   uses: %3d: ", _uses[n->_idx]);
2570       n->dump();
2571     }
2572 #endif
2573   }
2574 
2575 #ifndef PRODUCT
2576   if (_cfg->C->trace_opto_output())
2577     tty->print("# <- ComputeUseCount\n");
2578 #endif
2579 }
2580 
2581 // This routine performs scheduling on each basic block in reverse order,
2582 // using instruction latencies and taking into account function unit
2583 // availability.
2584 void Scheduling::DoScheduling() {
2585 #ifndef PRODUCT
2586   if (_cfg->C->trace_opto_output())
2587     tty->print("# -> DoScheduling\n");
2588 #endif
2589 
2590   Block *succ_bb = nullptr;
2591   Block *bb;
2592   Compile* C = Compile::current();
2593 
2594   // Walk over all the basic blocks in reverse order
2595   for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
2596     bb = _cfg->get_block(i);
2597 
2598 #ifndef PRODUCT
2599     if (_cfg->C->trace_opto_output()) {
2600       tty->print("#  Schedule BB#%03d (initial)\n", i);
2601       for (uint j = 0; j < bb->number_of_nodes(); j++) {
2602         bb->get_node(j)->dump();
2603       }
2604     }
2605 #endif
2606 
2607     // On the head node, skip processing
2608     if (bb == _cfg->get_root_block()) {
2609       continue;
2610     }
2611 
2612     // Skip empty, connector blocks
2613     if (bb->is_connector())
2614       continue;
2615 
2616     // If the following block is not the sole successor of
2617     // this one, then reset the pipeline information
2618     if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
2619 #ifndef PRODUCT
2620       if (_cfg->C->trace_opto_output()) {
2621         tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
2622                    _next_node->_idx, _bundle_instr_count);
2623       }
2624 #endif
2625       step_and_clear();
2626     }
2627 
2628     // Leave untouched the starting instruction, any Phis, a CreateEx node
2629     // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
2630     _bb_end = bb->number_of_nodes()-1;
2631     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2632       Node *n = bb->get_node(_bb_start);
2633       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2634       // Also, MachIdealNodes do not get scheduled
2635       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
2636       MachNode *mach = n->as_Mach();
2637       int iop = mach->ideal_Opcode();
2638       if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2639       if( iop == Op_Con ) continue;      // Do not schedule Top
2640       if( iop == Op_Node &&     // Do not schedule PhiNodes, ProjNodes
2641           mach->pipeline() == MachNode::pipeline_class() &&
2642           !n->is_SpillCopy() && !n->is_MachMerge() )  // Breakpoints, Prolog, etc
2643         continue;
2644       break;                    // Funny loop structure to be sure...
2645     }
2646     // Compute last "interesting" instruction in block - last instruction we
2647     // might schedule.  _bb_end points just after last schedulable inst.
2648     Node *last = bb->get_node(_bb_end);
2649     // Ignore trailing NOPs.
2650     while (_bb_end > 0 && last->is_Mach() &&
2651            last->as_Mach()->ideal_Opcode() == Op_Con) {
2652       last = bb->get_node(--_bb_end);
2653     }
2654     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
2655     if( last->is_Catch() ||
2656         (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2657       // There might be a prior call.  Skip it.
2658       while (_bb_start < _bb_end && bb->get_node(--_bb_end)->is_MachProj());
2659     } else if( last->is_MachNullCheck() ) {
2660       // Backup so the last null-checked memory instruction is
2661       // outside the schedulable range. Skip over the nullcheck,
2662       // projection, and the memory nodes.
2663       Node *mem = last->in(1);
2664       do {
2665         _bb_end--;
2666       } while (mem != bb->get_node(_bb_end));
2667     } else {
2668       // Set _bb_end to point after last schedulable inst.
2669       _bb_end++;
2670     }
2671 
2672     assert( _bb_start <= _bb_end, "inverted block ends" );
2673 
2674     // Compute the register antidependencies for the basic block
2675     ComputeRegisterAntidependencies(bb);
2676     if (C->failing())  return;  // too many D-U pinch points
2677 
2678     // Compute the usage within the block, and set the list of all nodes
2679     // in the block that have no uses within the block.
2680     ComputeUseCount(bb);
2681 
2682     // Schedule the remaining instructions in the block
2683     while ( _available.size() > 0 ) {
2684       Node *n = ChooseNodeToBundle();
2685       guarantee(n != nullptr, "no nodes available");
2686       AddNodeToBundle(n,bb);
2687     }
2688 
2689     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2690 #ifdef ASSERT
2691     for( uint l = _bb_start; l < _bb_end; l++ ) {
2692       Node *n = bb->get_node(l);
2693       uint m;
2694       for( m = 0; m < _bb_end-_bb_start; m++ )
2695         if( _scheduled[m] == n )
2696           break;
2697       assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2698     }
2699 #endif
2700 
2701     // Now copy the instructions (in reverse order) back to the block
2702     for ( uint k = _bb_start; k < _bb_end; k++ )
2703       bb->map_node(_scheduled[_bb_end-k-1], k);
2704 
2705 #ifndef PRODUCT
2706     if (_cfg->C->trace_opto_output()) {
2707       tty->print("#  Schedule BB#%03d (final)\n", i);
2708       uint current = 0;
2709       for (uint j = 0; j < bb->number_of_nodes(); j++) {
2710         Node *n = bb->get_node(j);
2711         if( valid_bundle_info(n) ) {
2712           Bundle *bundle = node_bundling(n);
2713           if (bundle->instr_count() > 0) {
2714             tty->print("*** Bundle: ");
2715             bundle->dump();
2716           }
2717           n->dump();
2718         }
2719       }
2720     }
2721 #endif
2722 #ifdef ASSERT
2723     verify_good_schedule(bb,"after block local scheduling");
2724 #endif
2725   }
2726 
2727 #ifndef PRODUCT
2728   if (_cfg->C->trace_opto_output())
2729     tty->print("# <- DoScheduling\n");
2730 #endif
2731 
2732   // Record final node-bundling array location
2733   _regalloc->C->output()->set_node_bundling_base(_node_bundling_base);
2734 
2735 } // end DoScheduling
2736 
2737 // Verify that no live-range used in the block is killed in the block by a
2738 // wrong DEF.  This doesn't verify live-ranges that span blocks.
2739 
2740 // Check for edge existence.  Used to avoid adding redundant precedence edges.
2741 static bool edge_from_to( Node *from, Node *to ) {
2742   for( uint i=0; i<from->len(); i++ )
2743     if( from->in(i) == to )
2744       return true;
2745   return false;
2746 }
2747 
2748 #ifdef ASSERT
2749 void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
2750   // Check for bad kills
2751   if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
2752     Node *prior_use = _reg_node[def];
2753     if( prior_use && !edge_from_to(prior_use,n) ) {
2754       tty->print("%s = ",OptoReg::as_VMReg(def)->name());
2755       n->dump();
2756       tty->print_cr("...");
2757       prior_use->dump();
2758       assert(edge_from_to(prior_use,n), "%s", msg);
2759     }
2760     _reg_node.map(def,nullptr); // Kill live USEs
2761   }
2762 }
2763 
2764 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
2765 
2766   // Zap to something reasonable for the verify code
2767   _reg_node.clear();
2768 
2769   // Walk over the block backwards.  Check to make sure each DEF doesn't
2770   // kill a live value (other than the one it's supposed to).  Add each
2771   // USE to the live set.
2772   for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
2773     Node *n = b->get_node(i);
2774     int n_op = n->Opcode();
2775     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2776       // Fat-proj kills a slew of registers
2777       RegMaskIterator rmi(n->out_RegMask());
2778       while (rmi.has_next()) {
2779         OptoReg::Name kill = rmi.next();
2780         verify_do_def(n, kill, msg);
2781       }
2782     } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
2783       // Get DEF'd registers the normal way
2784       verify_do_def( n, _regalloc->get_reg_first(n), msg );
2785       verify_do_def( n, _regalloc->get_reg_second(n), msg );
2786     }
2787 
2788     // Now make all USEs live
2789     for( uint i=1; i<n->req(); i++ ) {
2790       Node *def = n->in(i);
2791       assert(def != nullptr, "input edge required");
2792       OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
2793       OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
2794       if( OptoReg::is_valid(reg_lo) ) {
2795         assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), "%s", msg);
2796         _reg_node.map(reg_lo,n);
2797       }
2798       if( OptoReg::is_valid(reg_hi) ) {
2799         assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), "%s", msg);
2800         _reg_node.map(reg_hi,n);
2801       }
2802     }
2803 
2804   }
2805 
2806   // Zap to something reasonable for the Antidependence code
2807   _reg_node.clear();
2808 }
2809 #endif
2810 
2811 // Conditionally add precedence edges.  Avoid putting edges on Projs.
2812 static void add_prec_edge_from_to( Node *from, Node *to ) {
2813   if( from->is_Proj() ) {       // Put precedence edge on Proj's input
2814     assert( from->req() == 1 && (from->len() == 1 || from->in(1) == nullptr), "no precedence edges on projections" );
2815     from = from->in(0);
2816   }
2817   if( from != to &&             // No cycles (for things like LD L0,[L0+4] )
2818       !edge_from_to( from, to ) ) // Avoid duplicate edge
2819     from->add_prec(to);
2820 }
2821 
2822 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
2823   if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
2824     return;
2825 
2826   if (OptoReg::is_reg(def_reg)) {
2827     VMReg vmreg = OptoReg::as_VMReg(def_reg);
2828     if (vmreg->is_reg() && !vmreg->is_concrete() && !vmreg->prev()->is_concrete()) {
2829       // This is one of the high slots of a vector register.
2830       // ScheduleAndBundle already checked there are no live wide
2831       // vectors in this method so it can be safely ignored.
2832       return;
2833     }
2834   }
2835 
2836   Node *pinch = _reg_node[def_reg]; // Get pinch point
2837   if ((pinch == nullptr) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
2838       is_def ) {    // Check for a true def (not a kill)
2839     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
2840     return;
2841   }
2842 
2843   Node *kill = def;             // Rename 'def' to more descriptive 'kill'
2844   DEBUG_ONLY( def = (Node*)((intptr_t)0xdeadbeef); )
2845 
2846   // After some number of kills there _may_ be a later def
2847   Node *later_def = nullptr;
2848 
2849   Compile* C = Compile::current();
2850 
2851   // Finding a kill requires a real pinch-point.
2852   // Check for not already having a pinch-point.
2853   // Pinch points are Op_Node's.
2854   if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
2855     later_def = pinch;            // Must be def/kill as optimistic pinch-point
2856     if ( _pinch_free_list.size() > 0) {
2857       pinch = _pinch_free_list.pop();
2858     } else {
2859       pinch = new Node(1); // Pinch point to-be
2860     }
2861     if (pinch->_idx >= _regalloc->node_regs_max_index()) {
2862       DEBUG_ONLY( pinch->dump(); );
2863       assert(false, "too many D-U pinch points: %d >= %d", pinch->_idx, _regalloc->node_regs_max_index());
2864       _cfg->C->record_method_not_compilable("too many D-U pinch points");
2865       return;
2866     }
2867     _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
2868     _reg_node.map(def_reg,pinch); // Record pinch-point
2869     //regalloc()->set_bad(pinch->_idx); // Already initialized this way.
2870     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
2871       pinch->init_req(0, C->top());     // set not null for the next call
2872       add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
2873       later_def = nullptr;           // and no later def
2874     }
2875     pinch->set_req(0,later_def);  // Hook later def so we can find it
2876   } else {                        // Else have valid pinch point
2877     if( pinch->in(0) )            // If there is a later-def
2878       later_def = pinch->in(0);   // Get it
2879   }
2880 
2881   // Add output-dependence edge from later def to kill
2882   if( later_def )               // If there is some original def
2883     add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
2884 
2885   // See if current kill is also a use, and so is forced to be the pinch-point.
2886   if( pinch->Opcode() == Op_Node ) {
2887     Node *uses = kill->is_Proj() ? kill->in(0) : kill;
2888     for( uint i=1; i<uses->req(); i++ ) {
2889       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
2890           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
2891         // Yes, found a use/kill pinch-point
2892         pinch->set_req(0,nullptr);  //
2893         pinch->replace_by(kill); // Move anti-dep edges up
2894         pinch = kill;
2895         _reg_node.map(def_reg,pinch);
2896         return;
2897       }
2898     }
2899   }
2900 
2901   // Add edge from kill to pinch-point
2902   add_prec_edge_from_to(kill,pinch);
2903 }
2904 
2905 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2906   if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2907     return;
2908   Node *pinch = _reg_node[use_reg]; // Get pinch point
2909   // Check for no later def_reg/kill in block
2910   if ((pinch != nullptr) && _cfg->get_block_for_node(pinch) == b &&
2911       // Use has to be block-local as well
2912       _cfg->get_block_for_node(use) == b) {
2913     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2914         pinch->req() == 1 ) {   // pinch not yet in block?
2915       pinch->del_req(0);        // yank pointer to later-def, also set flag
2916       // Insert the pinch-point in the block just after the last use
2917       b->insert_node(pinch, b->find_node(use) + 1);
2918       _bb_end++;                // Increase size scheduled region in block
2919     }
2920 
2921     add_prec_edge_from_to(pinch,use);
2922   }
2923 }
2924 
2925 // We insert antidependences between the reads and following write of
2926 // allocated registers to prevent illegal code motion. Hopefully, the
2927 // number of added references should be fairly small, especially as we
2928 // are only adding references within the current basic block.
2929 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
2930 
2931 #ifdef ASSERT
2932   verify_good_schedule(b,"before block local scheduling");
2933 #endif
2934 
2935   // A valid schedule, for each register independently, is an endless cycle
2936   // of: a def, then some uses (connected to the def by true dependencies),
2937   // then some kills (defs with no uses), finally the cycle repeats with a new
2938   // def.  The uses are allowed to float relative to each other, as are the
2939   // kills.  No use is allowed to slide past a kill (or def).  This requires
2940   // antidependencies between all uses of a single def and all kills that
2941   // follow, up to the next def.  More edges are redundant, because later defs
2942   // & kills are already serialized with true or antidependencies.  To keep
2943   // the edge count down, we add a 'pinch point' node if there's more than
2944   // one use or more than one kill/def.
2945 
2946   // We add dependencies in one bottom-up pass.
2947 
2948   // For each instruction we handle it's DEFs/KILLs, then it's USEs.
2949 
2950   // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
2951   // register.  If not, we record the DEF/KILL in _reg_node, the
2952   // register-to-def mapping.  If there is a prior DEF/KILL, we insert a
2953   // "pinch point", a new Node that's in the graph but not in the block.
2954   // We put edges from the prior and current DEF/KILLs to the pinch point.
2955   // We put the pinch point in _reg_node.  If there's already a pinch point
2956   // we merely add an edge from the current DEF/KILL to the pinch point.
2957 
2958   // After doing the DEF/KILLs, we handle USEs.  For each used register, we
2959   // put an edge from the pinch point to the USE.
2960 
2961   // To be expedient, the _reg_node array is pre-allocated for the whole
2962   // compilation.  _reg_node is lazily initialized; it either contains a null,
2963   // or a valid def/kill/pinch-point, or a leftover node from some prior
2964   // block.  Leftover node from some prior block is treated like a null (no
2965   // prior def, so no anti-dependence needed).  Valid def is distinguished by
2966   // it being in the current block.
2967   bool fat_proj_seen = false;
2968   uint last_safept = _bb_end-1;
2969   Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : nullptr;
2970   Node* last_safept_node = end_node;
2971   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2972     Node *n = b->get_node(i);
2973     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
2974     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2975       // Fat-proj kills a slew of registers
2976       // This can add edges to 'n' and obscure whether or not it was a def,
2977       // hence the is_def flag.
2978       fat_proj_seen = true;
2979       RegMaskIterator rmi(n->out_RegMask());
2980       while (rmi.has_next()) {
2981         OptoReg::Name kill = rmi.next();
2982         anti_do_def(b, n, kill, is_def);
2983       }
2984     } else {
2985       // Get DEF'd registers the normal way
2986       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2987       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2988     }
2989 
2990     // Kill projections on a branch should appear to occur on the
2991     // branch, not afterwards, so grab the masks from the projections
2992     // and process them.
2993     if (n->is_MachBranch() || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump)) {
2994       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2995         Node* use = n->fast_out(i);
2996         if (use->is_Proj()) {
2997           RegMaskIterator rmi(use->out_RegMask());
2998           while (rmi.has_next()) {
2999             OptoReg::Name kill = rmi.next();
3000             anti_do_def(b, n, kill, false);
3001           }
3002         }
3003       }
3004     }
3005 
3006     // Check each register used by this instruction for a following DEF/KILL
3007     // that must occur afterward and requires an anti-dependence edge.
3008     for( uint j=0; j<n->req(); j++ ) {
3009       Node *def = n->in(j);
3010       if( def ) {
3011         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
3012         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3013         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3014       }
3015     }
3016     // Do not allow defs of new derived values to float above GC
3017     // points unless the base is definitely available at the GC point.
3018 
3019     Node *m = b->get_node(i);
3020 
3021     // Add precedence edge from following safepoint to use of derived pointer
3022     if( last_safept_node != end_node &&
3023         m != last_safept_node) {
3024       for (uint k = 1; k < m->req(); k++) {
3025         const Type *t = m->in(k)->bottom_type();
3026         if( t->isa_oop_ptr() &&
3027             t->is_ptr()->offset() != 0 ) {
3028           last_safept_node->add_prec( m );
3029           break;
3030         }
3031       }
3032 
3033       // Do not allow a CheckCastPP node whose input is a raw pointer to
3034       // float past a safepoint.  This can occur when a buffered inline
3035       // type is allocated in a loop and the CheckCastPP from that
3036       // allocation is reused outside the loop.  If the use inside the
3037       // loop is scalarized the CheckCastPP will no longer be connected
3038       // to the loop safepoint.  See JDK-8264340.
3039       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3040         Node *def = m->in(1);
3041         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3042           last_safept_node->add_prec(m);
3043         }
3044       }
3045     }
3046 
3047     if( n->jvms() ) {           // Precedence edge from derived to safept
3048       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3049       if( b->get_node(last_safept) != last_safept_node ) {
3050         last_safept = b->find_node(last_safept_node);
3051       }
3052       for( uint j=last_safept; j > i; j-- ) {
3053         Node *mach = b->get_node(j);
3054         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3055           mach->add_prec( n );
3056       }
3057       last_safept = i;
3058       last_safept_node = m;
3059     }
3060   }
3061 
3062   if (fat_proj_seen) {
3063     // Garbage collect pinch nodes that were not consumed.
3064     // They are usually created by a fat kill MachProj for a call.
3065     garbage_collect_pinch_nodes();
3066   }
3067 }
3068 
3069 // Garbage collect pinch nodes for reuse by other blocks.
3070 //
3071 // The block scheduler's insertion of anti-dependence
3072 // edges creates many pinch nodes when the block contains
3073 // 2 or more Calls.  A pinch node is used to prevent a
3074 // combinatorial explosion of edges.  If a set of kills for a
3075 // register is anti-dependent on a set of uses (or defs), rather
3076 // than adding an edge in the graph between each pair of kill
3077 // and use (or def), a pinch is inserted between them:
3078 //
3079 //            use1   use2  use3
3080 //                \   |   /
3081 //                 \  |  /
3082 //                  pinch
3083 //                 /  |  \
3084 //                /   |   \
3085 //            kill1 kill2 kill3
3086 //
3087 // One pinch node is created per register killed when
3088 // the second call is encountered during a backwards pass
3089 // over the block.  Most of these pinch nodes are never
3090 // wired into the graph because the register is never
3091 // used or def'ed in the block.
3092 //
3093 void Scheduling::garbage_collect_pinch_nodes() {
3094 #ifndef PRODUCT
3095   if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
3096 #endif
3097   int trace_cnt = 0;
3098   for (uint k = 0; k < _reg_node.max(); k++) {
3099     Node* pinch = _reg_node[k];
3100     if ((pinch != nullptr) && pinch->Opcode() == Op_Node &&
3101         // no predecence input edges
3102         (pinch->req() == pinch->len() || pinch->in(pinch->req()) == nullptr) ) {
3103       cleanup_pinch(pinch);
3104       _pinch_free_list.push(pinch);
3105       _reg_node.map(k, nullptr);
3106 #ifndef PRODUCT
3107       if (_cfg->C->trace_opto_output()) {
3108         trace_cnt++;
3109         if (trace_cnt > 40) {
3110           tty->print("\n");
3111           trace_cnt = 0;
3112         }
3113         tty->print(" %d", pinch->_idx);
3114       }
3115 #endif
3116     }
3117   }
3118 #ifndef PRODUCT
3119   if (_cfg->C->trace_opto_output()) tty->print("\n");
3120 #endif
3121 }
3122 
3123 // Clean up a pinch node for reuse.
3124 void Scheduling::cleanup_pinch( Node *pinch ) {
3125   assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
3126 
3127   for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
3128     Node* use = pinch->last_out(i);
3129     uint uses_found = 0;
3130     for (uint j = use->req(); j < use->len(); j++) {
3131       if (use->in(j) == pinch) {
3132         use->rm_prec(j);
3133         uses_found++;
3134       }
3135     }
3136     assert(uses_found > 0, "must be a precedence edge");
3137     i -= uses_found;    // we deleted 1 or more copies of this edge
3138   }
3139   // May have a later_def entry
3140   pinch->set_req(0, nullptr);
3141 }
3142 
3143 #ifndef PRODUCT
3144 
3145 void Scheduling::dump_available() const {
3146   tty->print("#Availist  ");
3147   for (uint i = 0; i < _available.size(); i++)
3148     tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
3149   tty->cr();
3150 }
3151 
3152 // Print Scheduling Statistics
3153 void Scheduling::print_statistics() {
3154   // Print the size added by nops for bundling
3155   tty->print("Nops added %d bytes to total of %d bytes",
3156              _total_nop_size, _total_method_size);
3157   if (_total_method_size > 0)
3158     tty->print(", for %.2f%%",
3159                ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
3160   tty->print("\n");
3161 
3162   uint total_instructions = 0, total_bundles = 0;
3163 
3164   for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
3165     uint bundle_count   = _total_instructions_per_bundle[i];
3166     total_instructions += bundle_count * i;
3167     total_bundles      += bundle_count;
3168   }
3169 
3170   if (total_bundles > 0)
3171     tty->print("Average ILP (excluding nops) is %.2f\n",
3172                ((double)total_instructions) / ((double)total_bundles));
3173 }
3174 #endif
3175 
3176 //-----------------------init_scratch_buffer_blob------------------------------
3177 // Construct a temporary BufferBlob and cache it for this compile.
3178 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3179   // If there is already a scratch buffer blob allocated and the
3180   // constant section is big enough, use it.  Otherwise free the
3181   // current and allocate a new one.
3182   BufferBlob* blob = scratch_buffer_blob();
3183   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3184     // Use the current blob.
3185   } else {
3186     if (blob != nullptr) {
3187       BufferBlob::free(blob);
3188     }
3189 
3190     ResourceMark rm;
3191     _scratch_const_size = const_size;
3192     int size = C2Compiler::initial_code_buffer_size(const_size);
3193     if (C->has_scalarized_args()) {
3194       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3195       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3196       ciMethod* method = C->method();
3197       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3198       int arg_num = 0;
3199       if (!method->is_static()) {
3200         if (method->is_scalarized_arg(arg_num)) {
3201           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3202         }
3203         arg_num++;
3204       }
3205       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3206         if (method->is_scalarized_arg(arg_num)) {
3207           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3208         }
3209         arg_num++;
3210       }
3211     }
3212     blob = BufferBlob::create("Compile::scratch_buffer", size);
3213     // Record the buffer blob for next time.
3214     set_scratch_buffer_blob(blob);
3215     // Have we run out of code space?
3216     if (scratch_buffer_blob() == nullptr) {
3217       // Let CompilerBroker disable further compilations.
3218       C->record_failure("Not enough space for scratch buffer in CodeCache");
3219       return;
3220     }
3221   }
3222 
3223   // Initialize the relocation buffers
3224   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3225   set_scratch_locs_memory(locs_buf);
3226 }
3227 
3228 
3229 //-----------------------scratch_emit_size-------------------------------------
3230 // Helper function that computes size by emitting code
3231 uint PhaseOutput::scratch_emit_size(const Node* n) {
3232   // Start scratch_emit_size section.
3233   set_in_scratch_emit_size(true);
3234 
3235   // Emit into a trash buffer and count bytes emitted.
3236   // This is a pretty expensive way to compute a size,
3237   // but it works well enough if seldom used.
3238   // All common fixed-size instructions are given a size
3239   // method by the AD file.
3240   // Note that the scratch buffer blob and locs memory are
3241   // allocated at the beginning of the compile task, and
3242   // may be shared by several calls to scratch_emit_size.
3243   // The allocation of the scratch buffer blob is particularly
3244   // expensive, since it has to grab the code cache lock.
3245   BufferBlob* blob = this->scratch_buffer_blob();
3246   assert(blob != nullptr, "Initialize BufferBlob at start");
3247   assert(blob->size() > MAX_inst_size, "sanity");
3248   relocInfo* locs_buf = scratch_locs_memory();
3249   address blob_begin = blob->content_begin();
3250   address blob_end   = (address)locs_buf;
3251   assert(blob->contains(blob_end), "sanity");
3252   CodeBuffer buf(blob_begin, blob_end - blob_begin);
3253   buf.initialize_consts_size(_scratch_const_size);
3254   buf.initialize_stubs_size(MAX_stubs_size);
3255   assert(locs_buf != nullptr, "sanity");
3256   int lsize = MAX_locs_size / 3;
3257   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3258   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3259   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3260   // Mark as scratch buffer.
3261   buf.consts()->set_scratch_emit();
3262   buf.insts()->set_scratch_emit();
3263   buf.stubs()->set_scratch_emit();
3264 
3265   // Do the emission.
3266 
3267   Label fakeL; // Fake label for branch instructions.
3268   Label*   saveL = nullptr;
3269   uint save_bnum = 0;
3270   bool is_branch = n->is_MachBranch();
3271   C2_MacroAssembler masm(&buf);
3272   masm.bind(fakeL);
3273   if (is_branch) {
3274     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3275     n->as_MachBranch()->label_set(&fakeL, 0);
3276   }
3277   n->emit(&masm, C->regalloc());
3278 
3279   // Emitting into the scratch buffer should not fail
3280   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3281 
3282   // Restore label.
3283   if (is_branch) {
3284     n->as_MachBranch()->label_set(saveL, save_bnum);
3285   }
3286 
3287   // End scratch_emit_size section.
3288   set_in_scratch_emit_size(false);
3289 
3290   return buf.insts_size();
3291 }
3292 
3293 void PhaseOutput::install() {
3294   if (!C->should_install_code()) {
3295     return;
3296   } else if (C->stub_function() != nullptr) {
3297     install_stub(C->stub_name());
3298   } else {
3299     install_code(C->method(),
3300                  C->entry_bci(),
3301                  CompileBroker::compiler2(),
3302                  C->has_unsafe_access(),
3303                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3304   }
3305 }
3306 
3307 void PhaseOutput::install_code(ciMethod*         target,
3308                                int               entry_bci,
3309                                AbstractCompiler* compiler,
3310                                bool              has_unsafe_access,
3311                                bool              has_wide_vectors) {
3312   // Check if we want to skip execution of all compiled code.
3313   {
3314 #ifndef PRODUCT
3315     if (OptoNoExecute) {
3316       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3317       return;
3318     }
3319 #endif
3320     Compile::TracePhase tp(_t_registerMethod);
3321 
3322     if (C->is_osr_compilation()) {
3323       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3324       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3325     } else {
3326       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3327       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == CodeOffsets::no_such_entry_point) {
3328         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3329       }
3330       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == CodeOffsets::no_such_entry_point) {
3331         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3332       }
3333       if (_code_offsets.value(CodeOffsets::Entry) == CodeOffsets::no_such_entry_point) {
3334         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3335       }
3336       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3337     }
3338 
3339     C->env()->register_method(target,
3340                               entry_bci,
3341                               &_code_offsets,
3342                               _orig_pc_slot_offset_in_bytes,
3343                               code_buffer(),
3344                               frame_size_in_words(),
3345                               _oop_map_set,
3346                               &_handler_table,
3347                               inc_table(),
3348                               compiler,
3349                               has_unsafe_access,
3350                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3351                               C->has_monitors(),
3352                               C->has_scoped_access(),
3353                               0);
3354 
3355     if (C->log() != nullptr) { // Print code cache state into compiler log
3356       C->log()->code_cache_state();
3357     }
3358   }
3359 }
3360 void PhaseOutput::install_stub(const char* stub_name) {
3361   // Entry point will be accessed using stub_entry_point();
3362   if (code_buffer() == nullptr) {
3363     Matcher::soft_match_failure();
3364   } else {
3365     if (PrintAssembly && (WizardMode || Verbose))
3366       tty->print_cr("### Stub::%s", stub_name);
3367 
3368     if (!C->failing()) {
3369       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3370 
3371       // Make the NMethod
3372       // For now we mark the frame as never safe for profile stackwalking
3373       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3374                                                       code_buffer(),
3375                                                       CodeOffsets::frame_never_safe,
3376                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
3377                                                       frame_size_in_words(),
3378                                                       oop_map_set(),
3379                                                       false,
3380                                                       false);
3381 
3382       if (rs == nullptr) {
3383         C->record_failure("CodeCache is full");
3384       } else {
3385         assert(rs->is_runtime_stub(), "sanity check");
3386         C->set_stub_entry_point(rs->entry_point());
3387         BlobId blob_id = StubInfo::blob(C->stub_id());
3388         AOTCodeCache::store_code_blob(*rs, AOTCodeEntry::C2Blob, blob_id);
3389       }
3390     }
3391   }
3392 }
3393 
3394 // Support for bundling info
3395 Bundle* PhaseOutput::node_bundling(const Node *n) {
3396   assert(valid_bundle_info(n), "oob");
3397   return &_node_bundling_base[n->_idx];
3398 }
3399 
3400 bool PhaseOutput::valid_bundle_info(const Node *n) {
3401   return (_node_bundling_limit > n->_idx);
3402 }
3403 
3404 //------------------------------frame_size_in_words-----------------------------
3405 // frame_slots in units of words
3406 int PhaseOutput::frame_size_in_words() const {
3407   // shift is 0 in LP32 and 1 in LP64
3408   const int shift = (LogBytesPerWord - LogBytesPerInt);
3409   int words = _frame_slots >> shift;
3410   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
3411   return words;
3412 }
3413 
3414 // To bang the stack of this compiled method we use the stack size
3415 // that the interpreter would need in case of a deoptimization. This
3416 // removes the need to bang the stack in the deoptimization blob which
3417 // in turn simplifies stack overflow handling.
3418 int PhaseOutput::bang_size_in_bytes() const {
3419   return MAX2(frame_size_in_bytes() + os::extra_bang_size_in_bytes(), C->interpreter_frame_size());
3420 }
3421 
3422 //------------------------------dump_asm---------------------------------------
3423 // Dump formatted assembly
3424 #if defined(SUPPORT_OPTO_ASSEMBLY)
3425 void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) {
3426 
3427   int pc_digits = 3; // #chars required for pc
3428   int sb_chars  = 3; // #chars for "start bundle" indicator
3429   int tab_size  = 8;
3430   if (pcs != nullptr) {
3431     int max_pc = 0;
3432     for (uint i = 0; i < pc_limit; i++) {
3433       max_pc = (max_pc < pcs[i]) ? pcs[i] : max_pc;
3434     }
3435     pc_digits  = ((max_pc < 4096) ? 3 : ((max_pc < 65536) ? 4 : ((max_pc < 65536*256) ? 6 : 8))); // #chars required for pc
3436   }
3437   int prefix_len = ((pc_digits + sb_chars + tab_size - 1)/tab_size)*tab_size;
3438 
3439   bool cut_short = false;
3440   st->print_cr("#");
3441   st->print("#  ");  C->tf()->dump_on(st);  st->cr();
3442   st->print_cr("#");
3443 
3444   // For all blocks
3445   int pc = 0x0;                 // Program counter
3446   char starts_bundle = ' ';
3447   C->regalloc()->dump_frame();
3448 
3449   Node *n = nullptr;
3450   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
3451     if (VMThread::should_terminate()) {
3452       cut_short = true;
3453       break;
3454     }
3455     Block* block = C->cfg()->get_block(i);
3456     if (block->is_connector() && !Verbose) {
3457       continue;
3458     }
3459     n = block->head();
3460     if ((pcs != nullptr) && (n->_idx < pc_limit)) {
3461       pc = pcs[n->_idx];
3462       st->print("%*.*x", pc_digits, pc_digits, pc);
3463     }
3464     st->fill_to(prefix_len);
3465     block->dump_head(C->cfg(), st);
3466     if (block->is_connector()) {
3467       st->fill_to(prefix_len);
3468       st->print_cr("# Empty connector block");
3469     } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
3470       st->fill_to(prefix_len);
3471       st->print_cr("# Block is sole successor of call");
3472     }
3473 
3474     // For all instructions
3475     for (uint j = 0; j < block->number_of_nodes(); j++) {
3476       if (VMThread::should_terminate()) {
3477         cut_short = true;
3478         break;
3479       }
3480       n = block->get_node(j);
3481       if (valid_bundle_info(n)) {
3482         Bundle* bundle = node_bundling(n);
3483         if (bundle->starts_bundle()) {
3484           starts_bundle = '+';
3485         }
3486       }
3487 
3488       if (WizardMode) {
3489         n->dump();
3490       }
3491 
3492       if( !n->is_Region() &&    // Dont print in the Assembly
3493           !n->is_Phi() &&       // a few noisely useless nodes
3494           !n->is_Proj() &&
3495           !n->is_MachTemp() &&
3496           !n->is_SafePointScalarObject() &&
3497           !n->is_Catch() &&     // Would be nice to print exception table targets
3498           !n->is_MergeMem() &&  // Not very interesting
3499           !n->is_top() &&       // Debug info table constants
3500           !(n->is_Con() && !n->is_Mach())// Debug info table constants
3501           ) {
3502         if ((pcs != nullptr) && (n->_idx < pc_limit)) {
3503           pc = pcs[n->_idx];
3504           st->print("%*.*x", pc_digits, pc_digits, pc);
3505         } else {
3506           st->fill_to(pc_digits);
3507         }
3508         st->print(" %c ", starts_bundle);
3509         starts_bundle = ' ';
3510         st->fill_to(prefix_len);
3511         n->format(C->regalloc(), st);
3512         st->cr();
3513       }
3514 
3515       // Dump the exception table as well
3516       if( n->is_Catch() && (Verbose || WizardMode) ) {
3517         // Print the exception table for this offset
3518         _handler_table.print_subtable_for(pc);
3519       }
3520       st->bol(); // Make sure we start on a new line
3521     }
3522     st->cr(); // one empty line between blocks
3523   } // End of per-block dump
3524 
3525   if (cut_short)  st->print_cr("*** disassembly is cut short ***");
3526 }
3527 #endif
3528 
3529 #ifndef PRODUCT
3530 void PhaseOutput::print_statistics() {
3531   Scheduling::print_statistics();
3532 }
3533 #endif