< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 229     _first_block_size(0),
 230     _handler_table(),
 231     _inc_table(),
 232     _stub_list(),
 233     _oop_map_set(nullptr),
 234     _scratch_buffer_blob(nullptr),
 235     _scratch_locs_memory(nullptr),
 236     _scratch_const_size(-1),
 237     _in_scratch_emit_size(false),
 238     _frame_slots(0),
 239     _code_offsets(),
 240     _node_bundling_limit(0),
 241     _node_bundling_base(nullptr),
 242     _orig_pc_slot(0),
 243     _orig_pc_slot_offset_in_bytes(0),
 244     _buf_sizes(),
 245     _block(nullptr),
 246     _index(0) {
 247   C->set_output(this);
 248   if (C->stub_name() == nullptr) {
 249     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 250   }
 251 }
 252 
 253 PhaseOutput::~PhaseOutput() {
 254   C->set_output(nullptr);
 255   if (_scratch_buffer_blob != nullptr) {
 256     BufferBlob::free(_scratch_buffer_blob);
 257   }
 258 }
 259 
 260 void PhaseOutput::perform_mach_node_analysis() {
 261   // Late barrier analysis must be done after schedule and bundle
 262   // Otherwise liveness based spilling will fail
 263   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 264   bs->late_barrier_analysis();
 265 
 266   pd_perform_mach_node_analysis();
 267 
 268   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 269 }
 270 
 271 // Convert Nodes to instruction bits and pass off to the VM
 272 void PhaseOutput::Output() {
 273   // RootNode goes
 274   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 275 
 276   // The number of new nodes (mostly MachNop) is proportional to
 277   // the number of java calls and inner loops which are aligned.
 278   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 279                             C->inner_loops()*(OptoLoopAlignment-1)),
 280                            "out of nodes before code generation" ) ) {
 281     return;
 282   }
 283   // Make sure I can find the Start Node
 284   Block *entry = C->cfg()->get_block(1);
 285   Block *broot = C->cfg()->get_root_block();
 286 
 287   const StartNode *start = entry->head()->as_Start();
 288 
 289   // Replace StartNode with prolog
 290   MachPrologNode *prolog = new MachPrologNode();

 291   entry->map_node(prolog, 0);
 292   C->cfg()->map_node_to_block(prolog, entry);
 293   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 294 
 295   // Virtual methods need an unverified entry point
 296 
 297   if( C->is_osr_compilation() ) {
 298     if( PoisonOSREntry ) {
 299       // TODO: Should use a ShouldNotReachHereNode...
 300       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 301     }
 302   } else {
 303     if( C->method() && !C->method()->flags().is_static() ) {
 304       // Insert unvalidated entry point
 305       C->cfg()->insert( broot, 0, new MachUEPNode() );











 306     }
 307 
 308   }
 309 
 310   // Break before main entry point
 311   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 312       (OptoBreakpoint && C->is_method_compilation())       ||
 313       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 314       (OptoBreakpointC2R && !C->method())                   ) {
 315     // checking for C->method() means that OptoBreakpoint does not apply to
 316     // runtime stubs or frame converters
 317     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 318   }
 319 
 320   // Insert epilogs before every return
 321   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 322     Block* block = C->cfg()->get_block(i);
 323     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 324       Node* m = block->end();
 325       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 326         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 327         block->add_inst(epilog);
 328         C->cfg()->map_node_to_block(epilog, block);
 329       }
 330     }
 331   }
 332 
 333   // Keeper of sizing aspects
 334   _buf_sizes = BufferSizingData();
 335 
 336   // Initialize code buffer
 337   estimate_buffer_size(_buf_sizes._const);
 338   if (C->failing()) return;
 339 
 340   // Pre-compute the length of blocks and replace
 341   // long branches with short if machine supports it.
 342   // Must be done before ScheduleAndBundle due to SPARC delay slots
 343   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 344   blk_starts[0] = 0;
 345   shorten_branches(blk_starts);
 346 

























 347   ScheduleAndBundle();
 348   if (C->failing()) {
 349     return;
 350   }
 351 
 352   perform_mach_node_analysis();
 353 
 354   // Complete sizing of codebuffer
 355   CodeBuffer* cb = init_buffer();
 356   if (cb == nullptr || C->failing()) {
 357     return;
 358   }
 359 
 360   BuildOopMaps();
 361 
 362   if (C->failing())  {
 363     return;
 364   }
 365 
 366   fill_buffer(cb, blk_starts);

 487     // Sum all instruction sizes to compute block size
 488     uint last_inst = block->number_of_nodes();
 489     uint blk_size = 0;
 490     for (uint j = 0; j < last_inst; j++) {
 491       _index = j;
 492       Node* nj = block->get_node(_index);
 493       // Handle machine instruction nodes
 494       if (nj->is_Mach()) {
 495         MachNode* mach = nj->as_Mach();
 496         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 497         reloc_size += mach->reloc();
 498         if (mach->is_MachCall()) {
 499           // add size information for trampoline stub
 500           // class CallStubImpl is platform-specific and defined in the *.ad files.
 501           stub_size  += CallStubImpl::size_call_trampoline();
 502           reloc_size += CallStubImpl::reloc_call_trampoline();
 503 
 504           MachCallNode *mcall = mach->as_MachCall();
 505           // This destination address is NOT PC-relative
 506 
 507           mcall->method_set((intptr_t)mcall->entry_point());


 508 
 509           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 510             stub_size  += CompiledStaticCall::to_interp_stub_size();
 511             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 512           }
 513         } else if (mach->is_MachSafePoint()) {
 514           // If call/safepoint are adjacent, account for possible
 515           // nop to disambiguate the two safepoints.
 516           // ScheduleAndBundle() can rearrange nodes in a block,
 517           // check for all offsets inside this block.
 518           if (last_call_adr >= blk_starts[i]) {
 519             blk_size += nop_size;
 520           }
 521         }
 522         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 523           // Nop is inserted between "avoid back to back" instructions.
 524           // ScheduleAndBundle() can rearrange nodes in a block,
 525           // check for all offsets inside this block.
 526           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 527             blk_size += nop_size;

 742     // New functionality:
 743     //   Assert if the local is not top. In product mode let the new node
 744     //   override the old entry.
 745     assert(local == C->top(), "LocArray collision");
 746     if (local == C->top()) {
 747       return;
 748     }
 749     array->pop();
 750   }
 751   const Type *t = local->bottom_type();
 752 
 753   // Is it a safepoint scalar object node?
 754   if (local->is_SafePointScalarObject()) {
 755     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 756 
 757     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 758     if (sv == nullptr) {
 759       ciKlass* cik = t->is_oopptr()->exact_klass();
 760       assert(cik->is_instance_klass() ||
 761              cik->is_array_klass(), "Not supported allocation.");

















 762       sv = new ObjectValue(spobj->_idx,
 763                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 764       set_sv_for_object_node(objs, sv);
 765 
 766       uint first_ind = spobj->first_index(sfpt->jvms());
 767       for (uint i = 0; i < spobj->n_fields(); i++) {
 768         Node* fld_node = sfpt->in(first_ind+i);
 769         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 770       }
 771     }
 772     array->append(sv);
 773     return;
 774   } else if (local->is_SafePointScalarMerge()) {
 775     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 776     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 777 
 778     if (mv == NULL) {
 779       GrowableArray<ScopeValue*> deps;
 780 
 781       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 782       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 783       assert(deps.length() == 1, "missing value");
 784 
 785       int selector_idx = smerge->selector_idx(sfpt->jvms());
 786       (void)FillLocArray(1, NULL, sfpt->in(selector_idx), &deps, NULL);

 965 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
 966   for (int k = 0; k < monarray->length(); k++) {
 967     MonitorValue* mv = monarray->at(k);
 968     if (mv->owner() == ov) {
 969       return true;
 970     }
 971   }
 972 
 973   return false;
 974 }
 975 
 976 //--------------------------Process_OopMap_Node--------------------------------
 977 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 978   // Handle special safepoint nodes for synchronization
 979   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 980   MachCallNode      *mcall;
 981 
 982   int safepoint_pc_offset = current_offset;
 983   bool is_method_handle_invoke = false;
 984   bool return_oop = false;

 985   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 986   bool arg_escape = false;
 987 
 988   // Add the safepoint in the DebugInfoRecorder
 989   if( !mach->is_MachCall() ) {
 990     mcall = nullptr;
 991     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 992   } else {
 993     mcall = mach->as_MachCall();
 994 
 995     // Is the call a MethodHandle call?
 996     if (mcall->is_MachCallJava()) {
 997       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 998         assert(C->has_method_handle_invokes(), "must have been set during call generation");
 999         is_method_handle_invoke = true;
1000       }
1001       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1002     }
1003 
1004     // Check if a call returns an object.
1005     if (mcall->returns_pointer()) {
1006       return_oop = true;
1007     }



1008     safepoint_pc_offset += mcall->ret_addr_offset();
1009     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1010   }
1011 
1012   // Loop over the JVMState list to add scope information
1013   // Do not skip safepoints with a null method, they need monitor info
1014   JVMState* youngest_jvms = sfn->jvms();
1015   int max_depth = youngest_jvms->depth();
1016 
1017   // Allocate the object pool for scalar-replaced objects -- the map from
1018   // small-integer keys (which can be recorded in the local and ostack
1019   // arrays) to descriptions of the object state.
1020   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1021 
1022   // Visit scopes from oldest to youngest.
1023   for (int depth = 1; depth <= max_depth; depth++) {
1024     JVMState* jvms = youngest_jvms->of_depth(depth);
1025     int idx;
1026     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1027     // Safepoints that do not have method() set only provide oop-map and monitor info

1126     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1127     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1128 
1129     // Make method available for all Safepoints
1130     ciMethod* scope_method = method ? method : C->method();
1131     // Describe the scope here
1132     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1133     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1134     // Now we can describe the scope.
1135     methodHandle null_mh;
1136     bool rethrow_exception = false;
1137     C->debug_info()->describe_scope(
1138       safepoint_pc_offset,
1139       null_mh,
1140       scope_method,
1141       jvms->bci(),
1142       jvms->should_reexecute(),
1143       rethrow_exception,
1144       is_method_handle_invoke,
1145       return_oop,

1146       has_ea_local_in_scope,
1147       arg_escape,
1148       locvals,
1149       expvals,
1150       monvals
1151     );
1152   } // End jvms loop
1153 
1154   // Mark the end of the scope set.
1155   C->debug_info()->end_safepoint(safepoint_pc_offset);
1156 }
1157 
1158 
1159 
1160 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1161 class NonSafepointEmitter {
1162     Compile*  C;
1163     JVMState* _pending_jvms;
1164     int       _pending_offset;
1165 

1501           MachNode *nop = new MachNopNode(nops_cnt);
1502           block->insert_node(nop, j++);
1503           last_inst++;
1504           C->cfg()->map_node_to_block(nop, block);
1505           // Ensure enough space.
1506           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1507           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1508             C->record_failure("CodeCache is full");
1509             return;
1510           }
1511           nop->emit(*cb, C->regalloc());
1512           cb->flush_bundle(true);
1513           current_offset = cb->insts_size();
1514         }
1515 
1516         bool observe_safepoint = is_sfn;
1517         // Remember the start of the last call in a basic block
1518         if (is_mcall) {
1519           MachCallNode *mcall = mach->as_MachCall();
1520 
1521           // This destination address is NOT PC-relative
1522           mcall->method_set((intptr_t)mcall->entry_point());


1523 
1524           // Save the return address
1525           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1526 
1527           observe_safepoint = mcall->guaranteed_safepoint();
1528         }
1529 
1530         // sfn will be valid whenever mcall is valid now because of inheritance
1531         if (observe_safepoint) {
1532           // Handle special safepoint nodes for synchronization
1533           if (!is_mcall) {
1534             MachSafePointNode *sfn = mach->as_MachSafePoint();
1535             // !!!!! Stubs only need an oopmap right now, so bail out
1536             if (sfn->jvms()->method() == nullptr) {
1537               // Write the oopmap directly to the code blob??!!
1538               continue;
1539             }
1540           } // End synchronization
1541 
1542           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1666       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1667         node_offsets[n->_idx] = cb->insts_size();
1668       }
1669 #endif
1670       assert(!C->failing(), "Should not reach here if failing.");
1671 
1672       // "Normal" instruction case
1673       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1674       n->emit(*cb, C->regalloc());
1675       current_offset = cb->insts_size();
1676 
1677       // Above we only verified that there is enough space in the instruction section.
1678       // However, the instruction may emit stubs that cause code buffer expansion.
1679       // Bail out here if expansion failed due to a lack of code cache space.
1680       if (C->failing()) {
1681         return;
1682       }
1683 
1684       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1685              "ret_addr_offset() not within emitted code");
1686 
1687 #ifdef ASSERT
1688       uint n_size = n->size(C->regalloc());
1689       if (n_size < (current_offset-instr_offset)) {
1690         MachNode* mach = n->as_Mach();
1691         n->dump();
1692         mach->dump_format(C->regalloc(), tty);
1693         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1694         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1695         tty->print_cr(" ------------------- ");
1696         BufferBlob* blob = this->scratch_buffer_blob();
1697         address blob_begin = blob->content_begin();
1698         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1699         assert(false, "wrong size of mach node");
1700       }
1701 #endif
1702       non_safepoints.observe_instruction(n, current_offset);
1703 
1704       // mcall is last "call" that can be a safepoint
1705       // record it so we can see if a poll will directly follow it
1706       // in which case we'll need a pad to make the PcDesc sites unique

3099         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3100         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3101       }
3102     }
3103     // Do not allow defs of new derived values to float above GC
3104     // points unless the base is definitely available at the GC point.
3105 
3106     Node *m = b->get_node(i);
3107 
3108     // Add precedence edge from following safepoint to use of derived pointer
3109     if( last_safept_node != end_node &&
3110         m != last_safept_node) {
3111       for (uint k = 1; k < m->req(); k++) {
3112         const Type *t = m->in(k)->bottom_type();
3113         if( t->isa_oop_ptr() &&
3114             t->is_ptr()->offset() != 0 ) {
3115           last_safept_node->add_prec( m );
3116           break;
3117         }
3118       }













3119     }
3120 
3121     if( n->jvms() ) {           // Precedence edge from derived to safept
3122       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3123       if( b->get_node(last_safept) != last_safept_node ) {
3124         last_safept = b->find_node(last_safept_node);
3125       }
3126       for( uint j=last_safept; j > i; j-- ) {
3127         Node *mach = b->get_node(j);
3128         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3129           mach->add_prec( n );
3130       }
3131       last_safept = i;
3132       last_safept_node = m;
3133     }
3134   }
3135 
3136   if (fat_proj_seen) {
3137     // Garbage collect pinch nodes that were not consumed.
3138     // They are usually created by a fat kill MachProj for a call.

3257 }
3258 #endif
3259 
3260 //-----------------------init_scratch_buffer_blob------------------------------
3261 // Construct a temporary BufferBlob and cache it for this compile.
3262 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3263   // If there is already a scratch buffer blob allocated and the
3264   // constant section is big enough, use it.  Otherwise free the
3265   // current and allocate a new one.
3266   BufferBlob* blob = scratch_buffer_blob();
3267   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3268     // Use the current blob.
3269   } else {
3270     if (blob != nullptr) {
3271       BufferBlob::free(blob);
3272     }
3273 
3274     ResourceMark rm;
3275     _scratch_const_size = const_size;
3276     int size = C2Compiler::initial_code_buffer_size(const_size);



















3277     blob = BufferBlob::create("Compile::scratch_buffer", size);
3278     // Record the buffer blob for next time.
3279     set_scratch_buffer_blob(blob);
3280     // Have we run out of code space?
3281     if (scratch_buffer_blob() == nullptr) {
3282       // Let CompilerBroker disable further compilations.
3283       C->record_failure("Not enough space for scratch buffer in CodeCache");
3284       return;
3285     }
3286   }
3287 
3288   // Initialize the relocation buffers
3289   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3290   set_scratch_locs_memory(locs_buf);
3291 }
3292 
3293 
3294 //-----------------------scratch_emit_size-------------------------------------
3295 // Helper function that computes size by emitting code
3296 uint PhaseOutput::scratch_emit_size(const Node* n) {

3327   buf.insts()->set_scratch_emit();
3328   buf.stubs()->set_scratch_emit();
3329 
3330   // Do the emission.
3331 
3332   Label fakeL; // Fake label for branch instructions.
3333   Label*   saveL = nullptr;
3334   uint save_bnum = 0;
3335   bool is_branch = n->is_MachBranch();
3336   if (is_branch) {
3337     MacroAssembler masm(&buf);
3338     masm.bind(fakeL);
3339     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3340     n->as_MachBranch()->label_set(&fakeL, 0);
3341   }
3342   n->emit(buf, C->regalloc());
3343 
3344   // Emitting into the scratch buffer should not fail
3345   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3346 
3347   if (is_branch) // Restore label.

3348     n->as_MachBranch()->label_set(saveL, save_bnum);

3349 
3350   // End scratch_emit_size section.
3351   set_in_scratch_emit_size(false);
3352 
3353   return buf.insts_size();
3354 }
3355 
3356 void PhaseOutput::install() {
3357   if (!C->should_install_code()) {
3358     return;
3359   } else if (C->stub_function() != nullptr) {
3360     install_stub(C->stub_name());
3361   } else {
3362     install_code(C->method(),
3363                  C->entry_bci(),
3364                  CompileBroker::compiler2(),
3365                  C->has_unsafe_access(),
3366                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3367                  C->rtm_state());
3368   }

3372                                int               entry_bci,
3373                                AbstractCompiler* compiler,
3374                                bool              has_unsafe_access,
3375                                bool              has_wide_vectors,
3376                                RTMState          rtm_state) {
3377   // Check if we want to skip execution of all compiled code.
3378   {
3379 #ifndef PRODUCT
3380     if (OptoNoExecute) {
3381       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3382       return;
3383     }
3384 #endif
3385     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3386 
3387     if (C->is_osr_compilation()) {
3388       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3389       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3390     } else {
3391       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3392       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3393     }
3394 
3395     C->env()->register_method(target,
3396                                      entry_bci,
3397                                      &_code_offsets,
3398                                      _orig_pc_slot_offset_in_bytes,
3399                                      code_buffer(),
3400                                      frame_size_in_words(),
3401                                      oop_map_set(),
3402                                      &_handler_table,
3403                                      inc_table(),
3404                                      compiler,
3405                                      has_unsafe_access,
3406                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3407                                      C->has_monitors(),
3408                                      0,
3409                                      C->rtm_state());
3410 
3411     if (C->log() != nullptr) { // Print code cache state into compiler log
3412       C->log()->code_cache_state();
3413     }
3414   }
3415 }
3416 void PhaseOutput::install_stub(const char* stub_name) {
3417   // Entry point will be accessed using stub_entry_point();
3418   if (code_buffer() == nullptr) {
3419     Matcher::soft_match_failure();
3420   } else {
3421     if (PrintAssembly && (WizardMode || Verbose))
3422       tty->print_cr("### Stub::%s", stub_name);
3423 
3424     if (!C->failing()) {
3425       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3426 
3427       // Make the NMethod
3428       // For now we mark the frame as never safe for profile stackwalking
3429       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 230     _first_block_size(0),
 231     _handler_table(),
 232     _inc_table(),
 233     _stub_list(),
 234     _oop_map_set(nullptr),
 235     _scratch_buffer_blob(nullptr),
 236     _scratch_locs_memory(nullptr),
 237     _scratch_const_size(-1),
 238     _in_scratch_emit_size(false),
 239     _frame_slots(0),
 240     _code_offsets(),
 241     _node_bundling_limit(0),
 242     _node_bundling_base(nullptr),
 243     _orig_pc_slot(0),
 244     _orig_pc_slot_offset_in_bytes(0),
 245     _buf_sizes(),
 246     _block(nullptr),
 247     _index(0) {
 248   C->set_output(this);
 249   if (C->stub_name() == nullptr) {
 250     int fixed_slots = C->fixed_slots();
 251     if (C->needs_stack_repair()) {
 252       fixed_slots -= 2;
 253     }
 254     // TODO 8284443 Only reserve extra slot if needed
 255     if (InlineTypeReturnedAsFields) {
 256       fixed_slots -= 2;
 257     }
 258     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 259   }
 260 }
 261 
 262 PhaseOutput::~PhaseOutput() {
 263   C->set_output(nullptr);
 264   if (_scratch_buffer_blob != nullptr) {
 265     BufferBlob::free(_scratch_buffer_blob);
 266   }
 267 }
 268 
 269 void PhaseOutput::perform_mach_node_analysis() {
 270   // Late barrier analysis must be done after schedule and bundle
 271   // Otherwise liveness based spilling will fail
 272   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 273   bs->late_barrier_analysis();
 274 
 275   pd_perform_mach_node_analysis();
 276 
 277   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 278 }
 279 
 280 // Convert Nodes to instruction bits and pass off to the VM
 281 void PhaseOutput::Output() {
 282   // RootNode goes
 283   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 284 
 285   // The number of new nodes (mostly MachNop) is proportional to
 286   // the number of java calls and inner loops which are aligned.
 287   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 288                             C->inner_loops()*(OptoLoopAlignment-1)),
 289                            "out of nodes before code generation" ) ) {
 290     return;
 291   }
 292   // Make sure I can find the Start Node
 293   Block *entry = C->cfg()->get_block(1);
 294   Block *broot = C->cfg()->get_root_block();
 295 
 296   const StartNode *start = entry->head()->as_Start();
 297 
 298   // Replace StartNode with prolog
 299   Label verified_entry;
 300   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 301   entry->map_node(prolog, 0);
 302   C->cfg()->map_node_to_block(prolog, entry);
 303   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 304 
 305   // Virtual methods need an unverified entry point
 306   if (C->is_osr_compilation()) {
 307     if (PoisonOSREntry) {

 308       // TODO: Should use a ShouldNotReachHereNode...
 309       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 310     }
 311   } else {
 312     if (C->method()) {
 313       if (C->method()->has_scalarized_args()) {
 314         // Add entry point to unpack all inline type arguments
 315         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 316         if (!C->method()->is_static()) {
 317           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 318           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 319           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 320           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 321         }
 322       } else if (!C->method()->is_static()) {
 323         // Insert unvalidated entry point
 324         C->cfg()->insert(broot, 0, new MachUEPNode());
 325       }
 326     }

 327   }
 328 
 329   // Break before main entry point
 330   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 331       (OptoBreakpoint && C->is_method_compilation())       ||
 332       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 333       (OptoBreakpointC2R && !C->method())                   ) {
 334     // checking for C->method() means that OptoBreakpoint does not apply to
 335     // runtime stubs or frame converters
 336     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 337   }
 338 
 339   // Insert epilogs before every return
 340   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 341     Block* block = C->cfg()->get_block(i);
 342     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 343       Node* m = block->end();
 344       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 345         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 346         block->add_inst(epilog);
 347         C->cfg()->map_node_to_block(epilog, block);
 348       }
 349     }
 350   }
 351 
 352   // Keeper of sizing aspects
 353   _buf_sizes = BufferSizingData();
 354 
 355   // Initialize code buffer
 356   estimate_buffer_size(_buf_sizes._const);
 357   if (C->failing()) return;
 358 
 359   // Pre-compute the length of blocks and replace
 360   // long branches with short if machine supports it.
 361   // Must be done before ScheduleAndBundle due to SPARC delay slots
 362   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 363   blk_starts[0] = 0;
 364   shorten_branches(blk_starts);
 365 
 366   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 367     // Compute the offsets of the entry points required by the inline type calling convention
 368     if (!C->method()->is_static()) {
 369       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 370       // Entry                     (unverified) @ offset 0
 371       // Verified_Inline_Entry_RO
 372       // Inline_Entry              (unverified)
 373       // Verified_Inline_Entry
 374       uint offset = 0;
 375       _code_offsets.set_value(CodeOffsets::Entry, offset);
 376 
 377       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 378       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 379 
 380       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 381       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 382 
 383       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 384       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 385     } else {
 386       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 387       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 388     }
 389   }
 390 
 391   ScheduleAndBundle();
 392   if (C->failing()) {
 393     return;
 394   }
 395 
 396   perform_mach_node_analysis();
 397 
 398   // Complete sizing of codebuffer
 399   CodeBuffer* cb = init_buffer();
 400   if (cb == nullptr || C->failing()) {
 401     return;
 402   }
 403 
 404   BuildOopMaps();
 405 
 406   if (C->failing())  {
 407     return;
 408   }
 409 
 410   fill_buffer(cb, blk_starts);

 531     // Sum all instruction sizes to compute block size
 532     uint last_inst = block->number_of_nodes();
 533     uint blk_size = 0;
 534     for (uint j = 0; j < last_inst; j++) {
 535       _index = j;
 536       Node* nj = block->get_node(_index);
 537       // Handle machine instruction nodes
 538       if (nj->is_Mach()) {
 539         MachNode* mach = nj->as_Mach();
 540         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 541         reloc_size += mach->reloc();
 542         if (mach->is_MachCall()) {
 543           // add size information for trampoline stub
 544           // class CallStubImpl is platform-specific and defined in the *.ad files.
 545           stub_size  += CallStubImpl::size_call_trampoline();
 546           reloc_size += CallStubImpl::reloc_call_trampoline();
 547 
 548           MachCallNode *mcall = mach->as_MachCall();
 549           // This destination address is NOT PC-relative
 550 
 551           if (mcall->entry_point() != nullptr) {
 552             mcall->method_set((intptr_t)mcall->entry_point());
 553           }
 554 
 555           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 556             stub_size  += CompiledStaticCall::to_interp_stub_size();
 557             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 558           }
 559         } else if (mach->is_MachSafePoint()) {
 560           // If call/safepoint are adjacent, account for possible
 561           // nop to disambiguate the two safepoints.
 562           // ScheduleAndBundle() can rearrange nodes in a block,
 563           // check for all offsets inside this block.
 564           if (last_call_adr >= blk_starts[i]) {
 565             blk_size += nop_size;
 566           }
 567         }
 568         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 569           // Nop is inserted between "avoid back to back" instructions.
 570           // ScheduleAndBundle() can rearrange nodes in a block,
 571           // check for all offsets inside this block.
 572           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 573             blk_size += nop_size;

 788     // New functionality:
 789     //   Assert if the local is not top. In product mode let the new node
 790     //   override the old entry.
 791     assert(local == C->top(), "LocArray collision");
 792     if (local == C->top()) {
 793       return;
 794     }
 795     array->pop();
 796   }
 797   const Type *t = local->bottom_type();
 798 
 799   // Is it a safepoint scalar object node?
 800   if (local->is_SafePointScalarObject()) {
 801     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 802 
 803     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 804     if (sv == nullptr) {
 805       ciKlass* cik = t->is_oopptr()->exact_klass();
 806       assert(cik->is_instance_klass() ||
 807              cik->is_array_klass(), "Not supported allocation.");
 808       uint first_ind = spobj->first_index(sfpt->jvms());
 809       // Nullable, scalarized inline types have an is_init input
 810       // that needs to be checked before using the field values.
 811       ScopeValue* is_init = nullptr;
 812       if (cik->is_inlinetype()) {
 813         Node* init_node = sfpt->in(first_ind++);
 814         assert(init_node != nullptr, "is_init node not found");
 815         if (!init_node->is_top()) {
 816           const TypeInt* init_type = init_node->bottom_type()->is_int();
 817           if (init_node->is_Con()) {
 818             is_init = new ConstantIntValue(init_type->get_con());
 819           } else {
 820             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 821             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 822           }
 823         }
 824       }
 825       sv = new ObjectValue(spobj->_idx,
 826                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 827       set_sv_for_object_node(objs, sv);
 828 

 829       for (uint i = 0; i < spobj->n_fields(); i++) {
 830         Node* fld_node = sfpt->in(first_ind+i);
 831         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 832       }
 833     }
 834     array->append(sv);
 835     return;
 836   } else if (local->is_SafePointScalarMerge()) {
 837     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 838     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 839 
 840     if (mv == NULL) {
 841       GrowableArray<ScopeValue*> deps;
 842 
 843       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 844       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 845       assert(deps.length() == 1, "missing value");
 846 
 847       int selector_idx = smerge->selector_idx(sfpt->jvms());
 848       (void)FillLocArray(1, NULL, sfpt->in(selector_idx), &deps, NULL);

1027 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
1028   for (int k = 0; k < monarray->length(); k++) {
1029     MonitorValue* mv = monarray->at(k);
1030     if (mv->owner() == ov) {
1031       return true;
1032     }
1033   }
1034 
1035   return false;
1036 }
1037 
1038 //--------------------------Process_OopMap_Node--------------------------------
1039 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1040   // Handle special safepoint nodes for synchronization
1041   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1042   MachCallNode      *mcall;
1043 
1044   int safepoint_pc_offset = current_offset;
1045   bool is_method_handle_invoke = false;
1046   bool return_oop = false;
1047   bool return_scalarized = false;
1048   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1049   bool arg_escape = false;
1050 
1051   // Add the safepoint in the DebugInfoRecorder
1052   if( !mach->is_MachCall() ) {
1053     mcall = nullptr;
1054     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1055   } else {
1056     mcall = mach->as_MachCall();
1057 
1058     // Is the call a MethodHandle call?
1059     if (mcall->is_MachCallJava()) {
1060       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1061         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1062         is_method_handle_invoke = true;
1063       }
1064       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1065     }
1066 
1067     // Check if a call returns an object.
1068     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1069       return_oop = true;
1070     }
1071     if (mcall->returns_scalarized()) {
1072       return_scalarized = true;
1073     }
1074     safepoint_pc_offset += mcall->ret_addr_offset();
1075     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1076   }
1077 
1078   // Loop over the JVMState list to add scope information
1079   // Do not skip safepoints with a null method, they need monitor info
1080   JVMState* youngest_jvms = sfn->jvms();
1081   int max_depth = youngest_jvms->depth();
1082 
1083   // Allocate the object pool for scalar-replaced objects -- the map from
1084   // small-integer keys (which can be recorded in the local and ostack
1085   // arrays) to descriptions of the object state.
1086   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1087 
1088   // Visit scopes from oldest to youngest.
1089   for (int depth = 1; depth <= max_depth; depth++) {
1090     JVMState* jvms = youngest_jvms->of_depth(depth);
1091     int idx;
1092     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1093     // Safepoints that do not have method() set only provide oop-map and monitor info

1192     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1193     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1194 
1195     // Make method available for all Safepoints
1196     ciMethod* scope_method = method ? method : C->method();
1197     // Describe the scope here
1198     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1199     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1200     // Now we can describe the scope.
1201     methodHandle null_mh;
1202     bool rethrow_exception = false;
1203     C->debug_info()->describe_scope(
1204       safepoint_pc_offset,
1205       null_mh,
1206       scope_method,
1207       jvms->bci(),
1208       jvms->should_reexecute(),
1209       rethrow_exception,
1210       is_method_handle_invoke,
1211       return_oop,
1212       return_scalarized,
1213       has_ea_local_in_scope,
1214       arg_escape,
1215       locvals,
1216       expvals,
1217       monvals
1218     );
1219   } // End jvms loop
1220 
1221   // Mark the end of the scope set.
1222   C->debug_info()->end_safepoint(safepoint_pc_offset);
1223 }
1224 
1225 
1226 
1227 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1228 class NonSafepointEmitter {
1229     Compile*  C;
1230     JVMState* _pending_jvms;
1231     int       _pending_offset;
1232 

1568           MachNode *nop = new MachNopNode(nops_cnt);
1569           block->insert_node(nop, j++);
1570           last_inst++;
1571           C->cfg()->map_node_to_block(nop, block);
1572           // Ensure enough space.
1573           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1574           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1575             C->record_failure("CodeCache is full");
1576             return;
1577           }
1578           nop->emit(*cb, C->regalloc());
1579           cb->flush_bundle(true);
1580           current_offset = cb->insts_size();
1581         }
1582 
1583         bool observe_safepoint = is_sfn;
1584         // Remember the start of the last call in a basic block
1585         if (is_mcall) {
1586           MachCallNode *mcall = mach->as_MachCall();
1587 
1588           if (mcall->entry_point() != nullptr) {
1589             // This destination address is NOT PC-relative
1590             mcall->method_set((intptr_t)mcall->entry_point());
1591           }
1592 
1593           // Save the return address
1594           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1595 
1596           observe_safepoint = mcall->guaranteed_safepoint();
1597         }
1598 
1599         // sfn will be valid whenever mcall is valid now because of inheritance
1600         if (observe_safepoint) {
1601           // Handle special safepoint nodes for synchronization
1602           if (!is_mcall) {
1603             MachSafePointNode *sfn = mach->as_MachSafePoint();
1604             // !!!!! Stubs only need an oopmap right now, so bail out
1605             if (sfn->jvms()->method() == nullptr) {
1606               // Write the oopmap directly to the code blob??!!
1607               continue;
1608             }
1609           } // End synchronization
1610 
1611           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1735       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1736         node_offsets[n->_idx] = cb->insts_size();
1737       }
1738 #endif
1739       assert(!C->failing(), "Should not reach here if failing.");
1740 
1741       // "Normal" instruction case
1742       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1743       n->emit(*cb, C->regalloc());
1744       current_offset = cb->insts_size();
1745 
1746       // Above we only verified that there is enough space in the instruction section.
1747       // However, the instruction may emit stubs that cause code buffer expansion.
1748       // Bail out here if expansion failed due to a lack of code cache space.
1749       if (C->failing()) {
1750         return;
1751       }
1752 
1753       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1754              "ret_addr_offset() not within emitted code");

1755 #ifdef ASSERT
1756       uint n_size = n->size(C->regalloc());
1757       if (n_size < (current_offset-instr_offset)) {
1758         MachNode* mach = n->as_Mach();
1759         n->dump();
1760         mach->dump_format(C->regalloc(), tty);
1761         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1762         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1763         tty->print_cr(" ------------------- ");
1764         BufferBlob* blob = this->scratch_buffer_blob();
1765         address blob_begin = blob->content_begin();
1766         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1767         assert(false, "wrong size of mach node");
1768       }
1769 #endif
1770       non_safepoints.observe_instruction(n, current_offset);
1771 
1772       // mcall is last "call" that can be a safepoint
1773       // record it so we can see if a poll will directly follow it
1774       // in which case we'll need a pad to make the PcDesc sites unique

3167         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3168         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3169       }
3170     }
3171     // Do not allow defs of new derived values to float above GC
3172     // points unless the base is definitely available at the GC point.
3173 
3174     Node *m = b->get_node(i);
3175 
3176     // Add precedence edge from following safepoint to use of derived pointer
3177     if( last_safept_node != end_node &&
3178         m != last_safept_node) {
3179       for (uint k = 1; k < m->req(); k++) {
3180         const Type *t = m->in(k)->bottom_type();
3181         if( t->isa_oop_ptr() &&
3182             t->is_ptr()->offset() != 0 ) {
3183           last_safept_node->add_prec( m );
3184           break;
3185         }
3186       }
3187 
3188       // Do not allow a CheckCastPP node whose input is a raw pointer to
3189       // float past a safepoint.  This can occur when a buffered inline
3190       // type is allocated in a loop and the CheckCastPP from that
3191       // allocation is reused outside the loop.  If the use inside the
3192       // loop is scalarized the CheckCastPP will no longer be connected
3193       // to the loop safepoint.  See JDK-8264340.
3194       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3195         Node *def = m->in(1);
3196         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3197           last_safept_node->add_prec(m);
3198         }
3199       }
3200     }
3201 
3202     if( n->jvms() ) {           // Precedence edge from derived to safept
3203       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3204       if( b->get_node(last_safept) != last_safept_node ) {
3205         last_safept = b->find_node(last_safept_node);
3206       }
3207       for( uint j=last_safept; j > i; j-- ) {
3208         Node *mach = b->get_node(j);
3209         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3210           mach->add_prec( n );
3211       }
3212       last_safept = i;
3213       last_safept_node = m;
3214     }
3215   }
3216 
3217   if (fat_proj_seen) {
3218     // Garbage collect pinch nodes that were not consumed.
3219     // They are usually created by a fat kill MachProj for a call.

3338 }
3339 #endif
3340 
3341 //-----------------------init_scratch_buffer_blob------------------------------
3342 // Construct a temporary BufferBlob and cache it for this compile.
3343 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3344   // If there is already a scratch buffer blob allocated and the
3345   // constant section is big enough, use it.  Otherwise free the
3346   // current and allocate a new one.
3347   BufferBlob* blob = scratch_buffer_blob();
3348   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3349     // Use the current blob.
3350   } else {
3351     if (blob != nullptr) {
3352       BufferBlob::free(blob);
3353     }
3354 
3355     ResourceMark rm;
3356     _scratch_const_size = const_size;
3357     int size = C2Compiler::initial_code_buffer_size(const_size);
3358     if (C->has_scalarized_args()) {
3359       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3360       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3361       ciMethod* method = C->method();
3362       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3363       int arg_num = 0;
3364       if (!method->is_static()) {
3365         if (method->is_scalarized_arg(arg_num)) {
3366           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3367         }
3368         arg_num++;
3369       }
3370       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3371         if (method->is_scalarized_arg(arg_num)) {
3372           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3373         }
3374         arg_num++;
3375       }
3376     }
3377     blob = BufferBlob::create("Compile::scratch_buffer", size);
3378     // Record the buffer blob for next time.
3379     set_scratch_buffer_blob(blob);
3380     // Have we run out of code space?
3381     if (scratch_buffer_blob() == nullptr) {
3382       // Let CompilerBroker disable further compilations.
3383       C->record_failure("Not enough space for scratch buffer in CodeCache");
3384       return;
3385     }
3386   }
3387 
3388   // Initialize the relocation buffers
3389   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3390   set_scratch_locs_memory(locs_buf);
3391 }
3392 
3393 
3394 //-----------------------scratch_emit_size-------------------------------------
3395 // Helper function that computes size by emitting code
3396 uint PhaseOutput::scratch_emit_size(const Node* n) {

3427   buf.insts()->set_scratch_emit();
3428   buf.stubs()->set_scratch_emit();
3429 
3430   // Do the emission.
3431 
3432   Label fakeL; // Fake label for branch instructions.
3433   Label*   saveL = nullptr;
3434   uint save_bnum = 0;
3435   bool is_branch = n->is_MachBranch();
3436   if (is_branch) {
3437     MacroAssembler masm(&buf);
3438     masm.bind(fakeL);
3439     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3440     n->as_MachBranch()->label_set(&fakeL, 0);
3441   }
3442   n->emit(buf, C->regalloc());
3443 
3444   // Emitting into the scratch buffer should not fail
3445   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3446 
3447   // Restore label.
3448   if (is_branch) {
3449     n->as_MachBranch()->label_set(saveL, save_bnum);
3450   }
3451 
3452   // End scratch_emit_size section.
3453   set_in_scratch_emit_size(false);
3454 
3455   return buf.insts_size();
3456 }
3457 
3458 void PhaseOutput::install() {
3459   if (!C->should_install_code()) {
3460     return;
3461   } else if (C->stub_function() != nullptr) {
3462     install_stub(C->stub_name());
3463   } else {
3464     install_code(C->method(),
3465                  C->entry_bci(),
3466                  CompileBroker::compiler2(),
3467                  C->has_unsafe_access(),
3468                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3469                  C->rtm_state());
3470   }

3474                                int               entry_bci,
3475                                AbstractCompiler* compiler,
3476                                bool              has_unsafe_access,
3477                                bool              has_wide_vectors,
3478                                RTMState          rtm_state) {
3479   // Check if we want to skip execution of all compiled code.
3480   {
3481 #ifndef PRODUCT
3482     if (OptoNoExecute) {
3483       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3484       return;
3485     }
3486 #endif
3487     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3488 
3489     if (C->is_osr_compilation()) {
3490       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3491       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3492     } else {
3493       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3494       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3495         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3496       }
3497       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3498         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3499       }
3500       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3501         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3502       }
3503       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3504     }
3505 
3506     C->env()->register_method(target,
3507                               entry_bci,
3508                               &_code_offsets,
3509                               _orig_pc_slot_offset_in_bytes,
3510                               code_buffer(),
3511                               frame_size_in_words(),
3512                               _oop_map_set,
3513                               &_handler_table,
3514                               inc_table(),
3515                               compiler,
3516                               has_unsafe_access,
3517                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3518                               C->has_monitors(),
3519                               0,
3520                               C->rtm_state());
3521 
3522     if (C->log() != nullptr) { // Print code cache state into compiler log
3523       C->log()->code_cache_state();
3524     }
3525   }
3526 }
3527 void PhaseOutput::install_stub(const char* stub_name) {
3528   // Entry point will be accessed using stub_entry_point();
3529   if (code_buffer() == nullptr) {
3530     Matcher::soft_match_failure();
3531   } else {
3532     if (PrintAssembly && (WizardMode || Verbose))
3533       tty->print_cr("### Stub::%s", stub_name);
3534 
3535     if (!C->failing()) {
3536       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3537 
3538       // Make the NMethod
3539       // For now we mark the frame as never safe for profile stackwalking
3540       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >