< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 229     _first_block_size(0),
 230     _handler_table(),
 231     _inc_table(),
 232     _stub_list(),
 233     _oop_map_set(nullptr),
 234     _scratch_buffer_blob(nullptr),
 235     _scratch_locs_memory(nullptr),
 236     _scratch_const_size(-1),
 237     _in_scratch_emit_size(false),
 238     _frame_slots(0),
 239     _code_offsets(),
 240     _node_bundling_limit(0),
 241     _node_bundling_base(nullptr),
 242     _orig_pc_slot(0),
 243     _orig_pc_slot_offset_in_bytes(0),
 244     _buf_sizes(),
 245     _block(nullptr),
 246     _index(0) {
 247   C->set_output(this);
 248   if (C->stub_name() == nullptr) {
 249     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 250   }
 251 }
 252 
 253 PhaseOutput::~PhaseOutput() {
 254   C->set_output(nullptr);
 255   if (_scratch_buffer_blob != nullptr) {
 256     BufferBlob::free(_scratch_buffer_blob);
 257   }
 258 }
 259 
 260 void PhaseOutput::perform_mach_node_analysis() {
 261   // Late barrier analysis must be done after schedule and bundle
 262   // Otherwise liveness based spilling will fail
 263   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 264   bs->late_barrier_analysis();
 265 
 266   pd_perform_mach_node_analysis();
 267 
 268   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 269 }
 270 
 271 // Convert Nodes to instruction bits and pass off to the VM
 272 void PhaseOutput::Output() {
 273   // RootNode goes
 274   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 275 
 276   // The number of new nodes (mostly MachNop) is proportional to
 277   // the number of java calls and inner loops which are aligned.
 278   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 279                             C->inner_loops()*(OptoLoopAlignment-1)),
 280                            "out of nodes before code generation" ) ) {
 281     return;
 282   }
 283   // Make sure I can find the Start Node
 284   Block *entry = C->cfg()->get_block(1);
 285   Block *broot = C->cfg()->get_root_block();
 286 
 287   const StartNode *start = entry->head()->as_Start();
 288 
 289   // Replace StartNode with prolog
 290   MachPrologNode *prolog = new MachPrologNode();

 291   entry->map_node(prolog, 0);
 292   C->cfg()->map_node_to_block(prolog, entry);
 293   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 294 
 295   // Virtual methods need an unverified entry point
 296 
 297   if( C->is_osr_compilation() ) {
 298     if( PoisonOSREntry ) {
 299       // TODO: Should use a ShouldNotReachHereNode...
 300       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 301     }
 302   } else {
 303     if( C->method() && !C->method()->flags().is_static() ) {
 304       // Insert unvalidated entry point
 305       C->cfg()->insert( broot, 0, new MachUEPNode() );











 306     }
 307 
 308   }
 309 
 310   // Break before main entry point
 311   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 312       (OptoBreakpoint && C->is_method_compilation())       ||
 313       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 314       (OptoBreakpointC2R && !C->method())                   ) {
 315     // checking for C->method() means that OptoBreakpoint does not apply to
 316     // runtime stubs or frame converters
 317     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 318   }
 319 
 320   // Insert epilogs before every return
 321   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 322     Block* block = C->cfg()->get_block(i);
 323     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 324       Node* m = block->end();
 325       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 326         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 327         block->add_inst(epilog);
 328         C->cfg()->map_node_to_block(epilog, block);
 329       }
 330     }
 331   }
 332 
 333   // Keeper of sizing aspects
 334   _buf_sizes = BufferSizingData();
 335 
 336   // Initialize code buffer
 337   estimate_buffer_size(_buf_sizes._const);
 338   if (C->failing()) return;
 339 
 340   // Pre-compute the length of blocks and replace
 341   // long branches with short if machine supports it.
 342   // Must be done before ScheduleAndBundle due to SPARC delay slots
 343   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 344   blk_starts[0] = 0;
 345   shorten_branches(blk_starts);
 346 

























 347   ScheduleAndBundle();
 348   if (C->failing()) {
 349     return;
 350   }
 351 
 352   perform_mach_node_analysis();
 353 
 354   // Complete sizing of codebuffer
 355   CodeBuffer* cb = init_buffer();
 356   if (cb == nullptr || C->failing()) {
 357     return;
 358   }
 359 
 360   BuildOopMaps();
 361 
 362   if (C->failing())  {
 363     return;
 364   }
 365 
 366   fill_buffer(cb, blk_starts);

 487     // Sum all instruction sizes to compute block size
 488     uint last_inst = block->number_of_nodes();
 489     uint blk_size = 0;
 490     for (uint j = 0; j < last_inst; j++) {
 491       _index = j;
 492       Node* nj = block->get_node(_index);
 493       // Handle machine instruction nodes
 494       if (nj->is_Mach()) {
 495         MachNode* mach = nj->as_Mach();
 496         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 497         reloc_size += mach->reloc();
 498         if (mach->is_MachCall()) {
 499           // add size information for trampoline stub
 500           // class CallStubImpl is platform-specific and defined in the *.ad files.
 501           stub_size  += CallStubImpl::size_call_trampoline();
 502           reloc_size += CallStubImpl::reloc_call_trampoline();
 503 
 504           MachCallNode *mcall = mach->as_MachCall();
 505           // This destination address is NOT PC-relative
 506 
 507           mcall->method_set((intptr_t)mcall->entry_point());


 508 
 509           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 510             stub_size  += CompiledDirectCall::to_interp_stub_size();
 511             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 512           }
 513         } else if (mach->is_MachSafePoint()) {
 514           // If call/safepoint are adjacent, account for possible
 515           // nop to disambiguate the two safepoints.
 516           // ScheduleAndBundle() can rearrange nodes in a block,
 517           // check for all offsets inside this block.
 518           if (last_call_adr >= blk_starts[i]) {
 519             blk_size += nop_size;
 520           }
 521         }
 522         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 523           // Nop is inserted between "avoid back to back" instructions.
 524           // ScheduleAndBundle() can rearrange nodes in a block,
 525           // check for all offsets inside this block.
 526           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 527             blk_size += nop_size;

 742     // New functionality:
 743     //   Assert if the local is not top. In product mode let the new node
 744     //   override the old entry.
 745     assert(local == C->top(), "LocArray collision");
 746     if (local == C->top()) {
 747       return;
 748     }
 749     array->pop();
 750   }
 751   const Type *t = local->bottom_type();
 752 
 753   // Is it a safepoint scalar object node?
 754   if (local->is_SafePointScalarObject()) {
 755     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 756 
 757     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 758     if (sv == nullptr) {
 759       ciKlass* cik = t->is_oopptr()->exact_klass();
 760       assert(cik->is_instance_klass() ||
 761              cik->is_array_klass(), "Not supported allocation.");

















 762       sv = new ObjectValue(spobj->_idx,
 763                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 764       set_sv_for_object_node(objs, sv);
 765 
 766       uint first_ind = spobj->first_index(sfpt->jvms());
 767       for (uint i = 0; i < spobj->n_fields(); i++) {
 768         Node* fld_node = sfpt->in(first_ind+i);
 769         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 770       }
 771     }
 772     array->append(sv);
 773     return;
 774   } else if (local->is_SafePointScalarMerge()) {
 775     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 776     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 777 
 778     if (mv == nullptr) {
 779       GrowableArray<ScopeValue*> deps;
 780 
 781       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 782       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 783       assert(deps.length() == 1, "missing value");
 784 
 785       int selector_idx = smerge->selector_idx(sfpt->jvms());
 786       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

 965 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
 966   for (int k = 0; k < monarray->length(); k++) {
 967     MonitorValue* mv = monarray->at(k);
 968     if (mv->owner() == ov) {
 969       return true;
 970     }
 971   }
 972 
 973   return false;
 974 }
 975 
 976 //--------------------------Process_OopMap_Node--------------------------------
 977 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 978   // Handle special safepoint nodes for synchronization
 979   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 980   MachCallNode      *mcall;
 981 
 982   int safepoint_pc_offset = current_offset;
 983   bool is_method_handle_invoke = false;
 984   bool return_oop = false;

 985   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 986   bool arg_escape = false;
 987 
 988   // Add the safepoint in the DebugInfoRecorder
 989   if( !mach->is_MachCall() ) {
 990     mcall = nullptr;
 991     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 992   } else {
 993     mcall = mach->as_MachCall();
 994 
 995     // Is the call a MethodHandle call?
 996     if (mcall->is_MachCallJava()) {
 997       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 998         assert(C->has_method_handle_invokes(), "must have been set during call generation");
 999         is_method_handle_invoke = true;
1000       }
1001       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1002     }
1003 
1004     // Check if a call returns an object.
1005     if (mcall->returns_pointer()) {
1006       return_oop = true;
1007     }



1008     safepoint_pc_offset += mcall->ret_addr_offset();
1009     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1010   }
1011 
1012   // Loop over the JVMState list to add scope information
1013   // Do not skip safepoints with a null method, they need monitor info
1014   JVMState* youngest_jvms = sfn->jvms();
1015   int max_depth = youngest_jvms->depth();
1016 
1017   // Allocate the object pool for scalar-replaced objects -- the map from
1018   // small-integer keys (which can be recorded in the local and ostack
1019   // arrays) to descriptions of the object state.
1020   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1021 
1022   // Visit scopes from oldest to youngest.
1023   for (int depth = 1; depth <= max_depth; depth++) {
1024     JVMState* jvms = youngest_jvms->of_depth(depth);
1025     int idx;
1026     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1027     // Safepoints that do not have method() set only provide oop-map and monitor info

1150     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1151     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1152 
1153     // Make method available for all Safepoints
1154     ciMethod* scope_method = method ? method : C->method();
1155     // Describe the scope here
1156     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1157     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1158     // Now we can describe the scope.
1159     methodHandle null_mh;
1160     bool rethrow_exception = false;
1161     C->debug_info()->describe_scope(
1162       safepoint_pc_offset,
1163       null_mh,
1164       scope_method,
1165       jvms->bci(),
1166       jvms->should_reexecute(),
1167       rethrow_exception,
1168       is_method_handle_invoke,
1169       return_oop,

1170       has_ea_local_in_scope,
1171       arg_escape,
1172       locvals,
1173       expvals,
1174       monvals
1175     );
1176   } // End jvms loop
1177 
1178   // Mark the end of the scope set.
1179   C->debug_info()->end_safepoint(safepoint_pc_offset);
1180 }
1181 
1182 
1183 
1184 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1185 class NonSafepointEmitter {
1186     Compile*  C;
1187     JVMState* _pending_jvms;
1188     int       _pending_offset;
1189 

1525           MachNode *nop = new MachNopNode(nops_cnt);
1526           block->insert_node(nop, j++);
1527           last_inst++;
1528           C->cfg()->map_node_to_block(nop, block);
1529           // Ensure enough space.
1530           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1532             C->record_failure("CodeCache is full");
1533             return;
1534           }
1535           nop->emit(*cb, C->regalloc());
1536           cb->flush_bundle(true);
1537           current_offset = cb->insts_size();
1538         }
1539 
1540         bool observe_safepoint = is_sfn;
1541         // Remember the start of the last call in a basic block
1542         if (is_mcall) {
1543           MachCallNode *mcall = mach->as_MachCall();
1544 
1545           // This destination address is NOT PC-relative
1546           mcall->method_set((intptr_t)mcall->entry_point());


1547 
1548           // Save the return address
1549           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1550 
1551           observe_safepoint = mcall->guaranteed_safepoint();
1552         }
1553 
1554         // sfn will be valid whenever mcall is valid now because of inheritance
1555         if (observe_safepoint) {
1556           // Handle special safepoint nodes for synchronization
1557           if (!is_mcall) {
1558             MachSafePointNode *sfn = mach->as_MachSafePoint();
1559             // !!!!! Stubs only need an oopmap right now, so bail out
1560             if (sfn->jvms()->method() == nullptr) {
1561               // Write the oopmap directly to the code blob??!!
1562               continue;
1563             }
1564           } // End synchronization
1565 
1566           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1690       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1691         node_offsets[n->_idx] = cb->insts_size();
1692       }
1693 #endif
1694       assert(!C->failing(), "Should not reach here if failing.");
1695 
1696       // "Normal" instruction case
1697       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1698       n->emit(*cb, C->regalloc());
1699       current_offset = cb->insts_size();
1700 
1701       // Above we only verified that there is enough space in the instruction section.
1702       // However, the instruction may emit stubs that cause code buffer expansion.
1703       // Bail out here if expansion failed due to a lack of code cache space.
1704       if (C->failing()) {
1705         return;
1706       }
1707 
1708       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1709              "ret_addr_offset() not within emitted code");
1710 
1711 #ifdef ASSERT
1712       uint n_size = n->size(C->regalloc());
1713       if (n_size < (current_offset-instr_offset)) {
1714         MachNode* mach = n->as_Mach();
1715         n->dump();
1716         mach->dump_format(C->regalloc(), tty);
1717         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1718         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1719         tty->print_cr(" ------------------- ");
1720         BufferBlob* blob = this->scratch_buffer_blob();
1721         address blob_begin = blob->content_begin();
1722         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1723         assert(false, "wrong size of mach node");
1724       }
1725 #endif
1726       non_safepoints.observe_instruction(n, current_offset);
1727 
1728       // mcall is last "call" that can be a safepoint
1729       // record it so we can see if a poll will directly follow it
1730       // in which case we'll need a pad to make the PcDesc sites unique

3123         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3124         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3125       }
3126     }
3127     // Do not allow defs of new derived values to float above GC
3128     // points unless the base is definitely available at the GC point.
3129 
3130     Node *m = b->get_node(i);
3131 
3132     // Add precedence edge from following safepoint to use of derived pointer
3133     if( last_safept_node != end_node &&
3134         m != last_safept_node) {
3135       for (uint k = 1; k < m->req(); k++) {
3136         const Type *t = m->in(k)->bottom_type();
3137         if( t->isa_oop_ptr() &&
3138             t->is_ptr()->offset() != 0 ) {
3139           last_safept_node->add_prec( m );
3140           break;
3141         }
3142       }













3143     }
3144 
3145     if( n->jvms() ) {           // Precedence edge from derived to safept
3146       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3147       if( b->get_node(last_safept) != last_safept_node ) {
3148         last_safept = b->find_node(last_safept_node);
3149       }
3150       for( uint j=last_safept; j > i; j-- ) {
3151         Node *mach = b->get_node(j);
3152         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3153           mach->add_prec( n );
3154       }
3155       last_safept = i;
3156       last_safept_node = m;
3157     }
3158   }
3159 
3160   if (fat_proj_seen) {
3161     // Garbage collect pinch nodes that were not consumed.
3162     // They are usually created by a fat kill MachProj for a call.

3281 }
3282 #endif
3283 
3284 //-----------------------init_scratch_buffer_blob------------------------------
3285 // Construct a temporary BufferBlob and cache it for this compile.
3286 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3287   // If there is already a scratch buffer blob allocated and the
3288   // constant section is big enough, use it.  Otherwise free the
3289   // current and allocate a new one.
3290   BufferBlob* blob = scratch_buffer_blob();
3291   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3292     // Use the current blob.
3293   } else {
3294     if (blob != nullptr) {
3295       BufferBlob::free(blob);
3296     }
3297 
3298     ResourceMark rm;
3299     _scratch_const_size = const_size;
3300     int size = C2Compiler::initial_code_buffer_size(const_size);



















3301     blob = BufferBlob::create("Compile::scratch_buffer", size);
3302     // Record the buffer blob for next time.
3303     set_scratch_buffer_blob(blob);
3304     // Have we run out of code space?
3305     if (scratch_buffer_blob() == nullptr) {
3306       // Let CompilerBroker disable further compilations.
3307       C->record_failure("Not enough space for scratch buffer in CodeCache");
3308       return;
3309     }
3310   }
3311 
3312   // Initialize the relocation buffers
3313   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3314   set_scratch_locs_memory(locs_buf);
3315 }
3316 
3317 
3318 //-----------------------scratch_emit_size-------------------------------------
3319 // Helper function that computes size by emitting code
3320 uint PhaseOutput::scratch_emit_size(const Node* n) {

3351   buf.insts()->set_scratch_emit();
3352   buf.stubs()->set_scratch_emit();
3353 
3354   // Do the emission.
3355 
3356   Label fakeL; // Fake label for branch instructions.
3357   Label*   saveL = nullptr;
3358   uint save_bnum = 0;
3359   bool is_branch = n->is_MachBranch();
3360   if (is_branch) {
3361     MacroAssembler masm(&buf);
3362     masm.bind(fakeL);
3363     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3364     n->as_MachBranch()->label_set(&fakeL, 0);
3365   }
3366   n->emit(buf, C->regalloc());
3367 
3368   // Emitting into the scratch buffer should not fail
3369   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3370 
3371   if (is_branch) // Restore label.

3372     n->as_MachBranch()->label_set(saveL, save_bnum);

3373 
3374   // End scratch_emit_size section.
3375   set_in_scratch_emit_size(false);
3376 
3377   return buf.insts_size();
3378 }
3379 
3380 void PhaseOutput::install() {
3381   if (!C->should_install_code()) {
3382     return;
3383   } else if (C->stub_function() != nullptr) {
3384     install_stub(C->stub_name());
3385   } else {
3386     install_code(C->method(),
3387                  C->entry_bci(),
3388                  CompileBroker::compiler2(),
3389                  C->has_unsafe_access(),
3390                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3391                  C->rtm_state());
3392   }

3399                                bool              has_wide_vectors,
3400                                RTMState          rtm_state) {
3401   // Check if we want to skip execution of all compiled code.
3402   {
3403 #ifndef PRODUCT
3404     if (OptoNoExecute) {
3405       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3406       return;
3407     }
3408 #endif
3409     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3410 
3411     if (C->is_osr_compilation()) {
3412       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3413       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3414     } else {
3415       if (!target->is_static()) {
3416         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3417         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3418         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3419         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());

3420       }
3421       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3422       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3423     }
3424 
3425     C->env()->register_method(target,
3426                                      entry_bci,
3427                                      &_code_offsets,
3428                                      _orig_pc_slot_offset_in_bytes,
3429                                      code_buffer(),
3430                                      frame_size_in_words(),
3431                                      oop_map_set(),
3432                                      &_handler_table,
3433                                      inc_table(),
3434                                      compiler,
3435                                      has_unsafe_access,
3436                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3437                                      C->has_monitors(),
3438                                      0,
3439                                      C->rtm_state());
3440 
3441     if (C->log() != nullptr) { // Print code cache state into compiler log
3442       C->log()->code_cache_state();
3443     }
3444   }
3445 }
3446 void PhaseOutput::install_stub(const char* stub_name) {
3447   // Entry point will be accessed using stub_entry_point();
3448   if (code_buffer() == nullptr) {
3449     Matcher::soft_match_failure();
3450   } else {
3451     if (PrintAssembly && (WizardMode || Verbose))
3452       tty->print_cr("### Stub::%s", stub_name);
3453 
3454     if (!C->failing()) {
3455       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3456 
3457       // Make the NMethod
3458       // For now we mark the frame as never safe for profile stackwalking
3459       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 230     _first_block_size(0),
 231     _handler_table(),
 232     _inc_table(),
 233     _stub_list(),
 234     _oop_map_set(nullptr),
 235     _scratch_buffer_blob(nullptr),
 236     _scratch_locs_memory(nullptr),
 237     _scratch_const_size(-1),
 238     _in_scratch_emit_size(false),
 239     _frame_slots(0),
 240     _code_offsets(),
 241     _node_bundling_limit(0),
 242     _node_bundling_base(nullptr),
 243     _orig_pc_slot(0),
 244     _orig_pc_slot_offset_in_bytes(0),
 245     _buf_sizes(),
 246     _block(nullptr),
 247     _index(0) {
 248   C->set_output(this);
 249   if (C->stub_name() == nullptr) {
 250     int fixed_slots = C->fixed_slots();
 251     if (C->needs_stack_repair()) {
 252       fixed_slots -= 2;
 253     }
 254     // TODO 8284443 Only reserve extra slot if needed
 255     if (InlineTypeReturnedAsFields) {
 256       fixed_slots -= 2;
 257     }
 258     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 259   }
 260 }
 261 
 262 PhaseOutput::~PhaseOutput() {
 263   C->set_output(nullptr);
 264   if (_scratch_buffer_blob != nullptr) {
 265     BufferBlob::free(_scratch_buffer_blob);
 266   }
 267 }
 268 
 269 void PhaseOutput::perform_mach_node_analysis() {
 270   // Late barrier analysis must be done after schedule and bundle
 271   // Otherwise liveness based spilling will fail
 272   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 273   bs->late_barrier_analysis();
 274 
 275   pd_perform_mach_node_analysis();
 276 
 277   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 278 }
 279 
 280 // Convert Nodes to instruction bits and pass off to the VM
 281 void PhaseOutput::Output() {
 282   // RootNode goes
 283   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 284 
 285   // The number of new nodes (mostly MachNop) is proportional to
 286   // the number of java calls and inner loops which are aligned.
 287   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 288                             C->inner_loops()*(OptoLoopAlignment-1)),
 289                            "out of nodes before code generation" ) ) {
 290     return;
 291   }
 292   // Make sure I can find the Start Node
 293   Block *entry = C->cfg()->get_block(1);
 294   Block *broot = C->cfg()->get_root_block();
 295 
 296   const StartNode *start = entry->head()->as_Start();
 297 
 298   // Replace StartNode with prolog
 299   Label verified_entry;
 300   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 301   entry->map_node(prolog, 0);
 302   C->cfg()->map_node_to_block(prolog, entry);
 303   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 304 
 305   // Virtual methods need an unverified entry point
 306   if (C->is_osr_compilation()) {
 307     if (PoisonOSREntry) {

 308       // TODO: Should use a ShouldNotReachHereNode...
 309       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 310     }
 311   } else {
 312     if (C->method()) {
 313       if (C->method()->has_scalarized_args()) {
 314         // Add entry point to unpack all inline type arguments
 315         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 316         if (!C->method()->is_static()) {
 317           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 318           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 319           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 320           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 321         }
 322       } else if (!C->method()->is_static()) {
 323         // Insert unvalidated entry point
 324         C->cfg()->insert(broot, 0, new MachUEPNode());
 325       }
 326     }

 327   }
 328 
 329   // Break before main entry point
 330   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 331       (OptoBreakpoint && C->is_method_compilation())       ||
 332       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 333       (OptoBreakpointC2R && !C->method())                   ) {
 334     // checking for C->method() means that OptoBreakpoint does not apply to
 335     // runtime stubs or frame converters
 336     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 337   }
 338 
 339   // Insert epilogs before every return
 340   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 341     Block* block = C->cfg()->get_block(i);
 342     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 343       Node* m = block->end();
 344       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 345         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 346         block->add_inst(epilog);
 347         C->cfg()->map_node_to_block(epilog, block);
 348       }
 349     }
 350   }
 351 
 352   // Keeper of sizing aspects
 353   _buf_sizes = BufferSizingData();
 354 
 355   // Initialize code buffer
 356   estimate_buffer_size(_buf_sizes._const);
 357   if (C->failing()) return;
 358 
 359   // Pre-compute the length of blocks and replace
 360   // long branches with short if machine supports it.
 361   // Must be done before ScheduleAndBundle due to SPARC delay slots
 362   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 363   blk_starts[0] = 0;
 364   shorten_branches(blk_starts);
 365 
 366   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 367     // Compute the offsets of the entry points required by the inline type calling convention
 368     if (!C->method()->is_static()) {
 369       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 370       // Entry                     (unverified) @ offset 0
 371       // Verified_Inline_Entry_RO
 372       // Inline_Entry              (unverified)
 373       // Verified_Inline_Entry
 374       uint offset = 0;
 375       _code_offsets.set_value(CodeOffsets::Entry, offset);
 376 
 377       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 378       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 379 
 380       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 381       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 382 
 383       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 384       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 385     } else {
 386       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 387       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 388     }
 389   }
 390 
 391   ScheduleAndBundle();
 392   if (C->failing()) {
 393     return;
 394   }
 395 
 396   perform_mach_node_analysis();
 397 
 398   // Complete sizing of codebuffer
 399   CodeBuffer* cb = init_buffer();
 400   if (cb == nullptr || C->failing()) {
 401     return;
 402   }
 403 
 404   BuildOopMaps();
 405 
 406   if (C->failing())  {
 407     return;
 408   }
 409 
 410   fill_buffer(cb, blk_starts);

 531     // Sum all instruction sizes to compute block size
 532     uint last_inst = block->number_of_nodes();
 533     uint blk_size = 0;
 534     for (uint j = 0; j < last_inst; j++) {
 535       _index = j;
 536       Node* nj = block->get_node(_index);
 537       // Handle machine instruction nodes
 538       if (nj->is_Mach()) {
 539         MachNode* mach = nj->as_Mach();
 540         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 541         reloc_size += mach->reloc();
 542         if (mach->is_MachCall()) {
 543           // add size information for trampoline stub
 544           // class CallStubImpl is platform-specific and defined in the *.ad files.
 545           stub_size  += CallStubImpl::size_call_trampoline();
 546           reloc_size += CallStubImpl::reloc_call_trampoline();
 547 
 548           MachCallNode *mcall = mach->as_MachCall();
 549           // This destination address is NOT PC-relative
 550 
 551           if (mcall->entry_point() != nullptr) {
 552             mcall->method_set((intptr_t)mcall->entry_point());
 553           }
 554 
 555           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 556             stub_size  += CompiledDirectCall::to_interp_stub_size();
 557             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 558           }
 559         } else if (mach->is_MachSafePoint()) {
 560           // If call/safepoint are adjacent, account for possible
 561           // nop to disambiguate the two safepoints.
 562           // ScheduleAndBundle() can rearrange nodes in a block,
 563           // check for all offsets inside this block.
 564           if (last_call_adr >= blk_starts[i]) {
 565             blk_size += nop_size;
 566           }
 567         }
 568         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 569           // Nop is inserted between "avoid back to back" instructions.
 570           // ScheduleAndBundle() can rearrange nodes in a block,
 571           // check for all offsets inside this block.
 572           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 573             blk_size += nop_size;

 788     // New functionality:
 789     //   Assert if the local is not top. In product mode let the new node
 790     //   override the old entry.
 791     assert(local == C->top(), "LocArray collision");
 792     if (local == C->top()) {
 793       return;
 794     }
 795     array->pop();
 796   }
 797   const Type *t = local->bottom_type();
 798 
 799   // Is it a safepoint scalar object node?
 800   if (local->is_SafePointScalarObject()) {
 801     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 802 
 803     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 804     if (sv == nullptr) {
 805       ciKlass* cik = t->is_oopptr()->exact_klass();
 806       assert(cik->is_instance_klass() ||
 807              cik->is_array_klass(), "Not supported allocation.");
 808       uint first_ind = spobj->first_index(sfpt->jvms());
 809       // Nullable, scalarized inline types have an is_init input
 810       // that needs to be checked before using the field values.
 811       ScopeValue* is_init = nullptr;
 812       if (cik->is_inlinetype()) {
 813         Node* init_node = sfpt->in(first_ind++);
 814         assert(init_node != nullptr, "is_init node not found");
 815         if (!init_node->is_top()) {
 816           const TypeInt* init_type = init_node->bottom_type()->is_int();
 817           if (init_node->is_Con()) {
 818             is_init = new ConstantIntValue(init_type->get_con());
 819           } else {
 820             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 821             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 822           }
 823         }
 824       }
 825       sv = new ObjectValue(spobj->_idx,
 826                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 827       set_sv_for_object_node(objs, sv);
 828 

 829       for (uint i = 0; i < spobj->n_fields(); i++) {
 830         Node* fld_node = sfpt->in(first_ind+i);
 831         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 832       }
 833     }
 834     array->append(sv);
 835     return;
 836   } else if (local->is_SafePointScalarMerge()) {
 837     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 838     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 839 
 840     if (mv == nullptr) {
 841       GrowableArray<ScopeValue*> deps;
 842 
 843       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 844       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 845       assert(deps.length() == 1, "missing value");
 846 
 847       int selector_idx = smerge->selector_idx(sfpt->jvms());
 848       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

1027 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
1028   for (int k = 0; k < monarray->length(); k++) {
1029     MonitorValue* mv = monarray->at(k);
1030     if (mv->owner() == ov) {
1031       return true;
1032     }
1033   }
1034 
1035   return false;
1036 }
1037 
1038 //--------------------------Process_OopMap_Node--------------------------------
1039 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1040   // Handle special safepoint nodes for synchronization
1041   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1042   MachCallNode      *mcall;
1043 
1044   int safepoint_pc_offset = current_offset;
1045   bool is_method_handle_invoke = false;
1046   bool return_oop = false;
1047   bool return_scalarized = false;
1048   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1049   bool arg_escape = false;
1050 
1051   // Add the safepoint in the DebugInfoRecorder
1052   if( !mach->is_MachCall() ) {
1053     mcall = nullptr;
1054     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1055   } else {
1056     mcall = mach->as_MachCall();
1057 
1058     // Is the call a MethodHandle call?
1059     if (mcall->is_MachCallJava()) {
1060       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1061         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1062         is_method_handle_invoke = true;
1063       }
1064       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1065     }
1066 
1067     // Check if a call returns an object.
1068     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1069       return_oop = true;
1070     }
1071     if (mcall->returns_scalarized()) {
1072       return_scalarized = true;
1073     }
1074     safepoint_pc_offset += mcall->ret_addr_offset();
1075     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1076   }
1077 
1078   // Loop over the JVMState list to add scope information
1079   // Do not skip safepoints with a null method, they need monitor info
1080   JVMState* youngest_jvms = sfn->jvms();
1081   int max_depth = youngest_jvms->depth();
1082 
1083   // Allocate the object pool for scalar-replaced objects -- the map from
1084   // small-integer keys (which can be recorded in the local and ostack
1085   // arrays) to descriptions of the object state.
1086   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1087 
1088   // Visit scopes from oldest to youngest.
1089   for (int depth = 1; depth <= max_depth; depth++) {
1090     JVMState* jvms = youngest_jvms->of_depth(depth);
1091     int idx;
1092     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1093     // Safepoints that do not have method() set only provide oop-map and monitor info

1216     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1217     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1218 
1219     // Make method available for all Safepoints
1220     ciMethod* scope_method = method ? method : C->method();
1221     // Describe the scope here
1222     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1223     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1224     // Now we can describe the scope.
1225     methodHandle null_mh;
1226     bool rethrow_exception = false;
1227     C->debug_info()->describe_scope(
1228       safepoint_pc_offset,
1229       null_mh,
1230       scope_method,
1231       jvms->bci(),
1232       jvms->should_reexecute(),
1233       rethrow_exception,
1234       is_method_handle_invoke,
1235       return_oop,
1236       return_scalarized,
1237       has_ea_local_in_scope,
1238       arg_escape,
1239       locvals,
1240       expvals,
1241       monvals
1242     );
1243   } // End jvms loop
1244 
1245   // Mark the end of the scope set.
1246   C->debug_info()->end_safepoint(safepoint_pc_offset);
1247 }
1248 
1249 
1250 
1251 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1252 class NonSafepointEmitter {
1253     Compile*  C;
1254     JVMState* _pending_jvms;
1255     int       _pending_offset;
1256 

1592           MachNode *nop = new MachNopNode(nops_cnt);
1593           block->insert_node(nop, j++);
1594           last_inst++;
1595           C->cfg()->map_node_to_block(nop, block);
1596           // Ensure enough space.
1597           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1598           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1599             C->record_failure("CodeCache is full");
1600             return;
1601           }
1602           nop->emit(*cb, C->regalloc());
1603           cb->flush_bundle(true);
1604           current_offset = cb->insts_size();
1605         }
1606 
1607         bool observe_safepoint = is_sfn;
1608         // Remember the start of the last call in a basic block
1609         if (is_mcall) {
1610           MachCallNode *mcall = mach->as_MachCall();
1611 
1612           if (mcall->entry_point() != nullptr) {
1613             // This destination address is NOT PC-relative
1614             mcall->method_set((intptr_t)mcall->entry_point());
1615           }
1616 
1617           // Save the return address
1618           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1619 
1620           observe_safepoint = mcall->guaranteed_safepoint();
1621         }
1622 
1623         // sfn will be valid whenever mcall is valid now because of inheritance
1624         if (observe_safepoint) {
1625           // Handle special safepoint nodes for synchronization
1626           if (!is_mcall) {
1627             MachSafePointNode *sfn = mach->as_MachSafePoint();
1628             // !!!!! Stubs only need an oopmap right now, so bail out
1629             if (sfn->jvms()->method() == nullptr) {
1630               // Write the oopmap directly to the code blob??!!
1631               continue;
1632             }
1633           } // End synchronization
1634 
1635           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1759       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1760         node_offsets[n->_idx] = cb->insts_size();
1761       }
1762 #endif
1763       assert(!C->failing(), "Should not reach here if failing.");
1764 
1765       // "Normal" instruction case
1766       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1767       n->emit(*cb, C->regalloc());
1768       current_offset = cb->insts_size();
1769 
1770       // Above we only verified that there is enough space in the instruction section.
1771       // However, the instruction may emit stubs that cause code buffer expansion.
1772       // Bail out here if expansion failed due to a lack of code cache space.
1773       if (C->failing()) {
1774         return;
1775       }
1776 
1777       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1778              "ret_addr_offset() not within emitted code");

1779 #ifdef ASSERT
1780       uint n_size = n->size(C->regalloc());
1781       if (n_size < (current_offset-instr_offset)) {
1782         MachNode* mach = n->as_Mach();
1783         n->dump();
1784         mach->dump_format(C->regalloc(), tty);
1785         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1786         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1787         tty->print_cr(" ------------------- ");
1788         BufferBlob* blob = this->scratch_buffer_blob();
1789         address blob_begin = blob->content_begin();
1790         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1791         assert(false, "wrong size of mach node");
1792       }
1793 #endif
1794       non_safepoints.observe_instruction(n, current_offset);
1795 
1796       // mcall is last "call" that can be a safepoint
1797       // record it so we can see if a poll will directly follow it
1798       // in which case we'll need a pad to make the PcDesc sites unique

3191         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3192         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3193       }
3194     }
3195     // Do not allow defs of new derived values to float above GC
3196     // points unless the base is definitely available at the GC point.
3197 
3198     Node *m = b->get_node(i);
3199 
3200     // Add precedence edge from following safepoint to use of derived pointer
3201     if( last_safept_node != end_node &&
3202         m != last_safept_node) {
3203       for (uint k = 1; k < m->req(); k++) {
3204         const Type *t = m->in(k)->bottom_type();
3205         if( t->isa_oop_ptr() &&
3206             t->is_ptr()->offset() != 0 ) {
3207           last_safept_node->add_prec( m );
3208           break;
3209         }
3210       }
3211 
3212       // Do not allow a CheckCastPP node whose input is a raw pointer to
3213       // float past a safepoint.  This can occur when a buffered inline
3214       // type is allocated in a loop and the CheckCastPP from that
3215       // allocation is reused outside the loop.  If the use inside the
3216       // loop is scalarized the CheckCastPP will no longer be connected
3217       // to the loop safepoint.  See JDK-8264340.
3218       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3219         Node *def = m->in(1);
3220         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3221           last_safept_node->add_prec(m);
3222         }
3223       }
3224     }
3225 
3226     if( n->jvms() ) {           // Precedence edge from derived to safept
3227       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3228       if( b->get_node(last_safept) != last_safept_node ) {
3229         last_safept = b->find_node(last_safept_node);
3230       }
3231       for( uint j=last_safept; j > i; j-- ) {
3232         Node *mach = b->get_node(j);
3233         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3234           mach->add_prec( n );
3235       }
3236       last_safept = i;
3237       last_safept_node = m;
3238     }
3239   }
3240 
3241   if (fat_proj_seen) {
3242     // Garbage collect pinch nodes that were not consumed.
3243     // They are usually created by a fat kill MachProj for a call.

3362 }
3363 #endif
3364 
3365 //-----------------------init_scratch_buffer_blob------------------------------
3366 // Construct a temporary BufferBlob and cache it for this compile.
3367 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3368   // If there is already a scratch buffer blob allocated and the
3369   // constant section is big enough, use it.  Otherwise free the
3370   // current and allocate a new one.
3371   BufferBlob* blob = scratch_buffer_blob();
3372   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3373     // Use the current blob.
3374   } else {
3375     if (blob != nullptr) {
3376       BufferBlob::free(blob);
3377     }
3378 
3379     ResourceMark rm;
3380     _scratch_const_size = const_size;
3381     int size = C2Compiler::initial_code_buffer_size(const_size);
3382     if (C->has_scalarized_args()) {
3383       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3384       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3385       ciMethod* method = C->method();
3386       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3387       int arg_num = 0;
3388       if (!method->is_static()) {
3389         if (method->is_scalarized_arg(arg_num)) {
3390           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3391         }
3392         arg_num++;
3393       }
3394       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3395         if (method->is_scalarized_arg(arg_num)) {
3396           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3397         }
3398         arg_num++;
3399       }
3400     }
3401     blob = BufferBlob::create("Compile::scratch_buffer", size);
3402     // Record the buffer blob for next time.
3403     set_scratch_buffer_blob(blob);
3404     // Have we run out of code space?
3405     if (scratch_buffer_blob() == nullptr) {
3406       // Let CompilerBroker disable further compilations.
3407       C->record_failure("Not enough space for scratch buffer in CodeCache");
3408       return;
3409     }
3410   }
3411 
3412   // Initialize the relocation buffers
3413   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3414   set_scratch_locs_memory(locs_buf);
3415 }
3416 
3417 
3418 //-----------------------scratch_emit_size-------------------------------------
3419 // Helper function that computes size by emitting code
3420 uint PhaseOutput::scratch_emit_size(const Node* n) {

3451   buf.insts()->set_scratch_emit();
3452   buf.stubs()->set_scratch_emit();
3453 
3454   // Do the emission.
3455 
3456   Label fakeL; // Fake label for branch instructions.
3457   Label*   saveL = nullptr;
3458   uint save_bnum = 0;
3459   bool is_branch = n->is_MachBranch();
3460   if (is_branch) {
3461     MacroAssembler masm(&buf);
3462     masm.bind(fakeL);
3463     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3464     n->as_MachBranch()->label_set(&fakeL, 0);
3465   }
3466   n->emit(buf, C->regalloc());
3467 
3468   // Emitting into the scratch buffer should not fail
3469   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3470 
3471   // Restore label.
3472   if (is_branch) {
3473     n->as_MachBranch()->label_set(saveL, save_bnum);
3474   }
3475 
3476   // End scratch_emit_size section.
3477   set_in_scratch_emit_size(false);
3478 
3479   return buf.insts_size();
3480 }
3481 
3482 void PhaseOutput::install() {
3483   if (!C->should_install_code()) {
3484     return;
3485   } else if (C->stub_function() != nullptr) {
3486     install_stub(C->stub_name());
3487   } else {
3488     install_code(C->method(),
3489                  C->entry_bci(),
3490                  CompileBroker::compiler2(),
3491                  C->has_unsafe_access(),
3492                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3493                  C->rtm_state());
3494   }

3501                                bool              has_wide_vectors,
3502                                RTMState          rtm_state) {
3503   // Check if we want to skip execution of all compiled code.
3504   {
3505 #ifndef PRODUCT
3506     if (OptoNoExecute) {
3507       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3508       return;
3509     }
3510 #endif
3511     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3512 
3513     if (C->is_osr_compilation()) {
3514       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3515       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3516     } else {
3517       if (!target->is_static()) {
3518         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3519         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3520         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3521         // TODO 8325106 Check this
3522         // _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3523       }
3524       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3525       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3526         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3527       }
3528       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3529         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3530       }
3531       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3532         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3533       }
3534       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3535     }
3536 
3537     C->env()->register_method(target,
3538                               entry_bci,
3539                               &_code_offsets,
3540                               _orig_pc_slot_offset_in_bytes,
3541                               code_buffer(),
3542                               frame_size_in_words(),
3543                               _oop_map_set,
3544                               &_handler_table,
3545                               inc_table(),
3546                               compiler,
3547                               has_unsafe_access,
3548                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3549                               C->has_monitors(),
3550                               0,
3551                               C->rtm_state());
3552 
3553     if (C->log() != nullptr) { // Print code cache state into compiler log
3554       C->log()->code_cache_state();
3555     }
3556   }
3557 }
3558 void PhaseOutput::install_stub(const char* stub_name) {
3559   // Entry point will be accessed using stub_entry_point();
3560   if (code_buffer() == nullptr) {
3561     Matcher::soft_match_failure();
3562   } else {
3563     if (PrintAssembly && (WizardMode || Verbose))
3564       tty->print_cr("### Stub::%s", stub_name);
3565 
3566     if (!C->failing()) {
3567       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3568 
3569       // Make the NMethod
3570       // For now we mark the frame as never safe for profile stackwalking
3571       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >