< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 224     _first_block_size(0),
 225     _handler_table(),
 226     _inc_table(),
 227     _stub_list(),
 228     _oop_map_set(nullptr),
 229     _scratch_buffer_blob(nullptr),
 230     _scratch_locs_memory(nullptr),
 231     _scratch_const_size(-1),
 232     _in_scratch_emit_size(false),
 233     _frame_slots(0),
 234     _code_offsets(),
 235     _node_bundling_limit(0),
 236     _node_bundling_base(nullptr),
 237     _orig_pc_slot(0),
 238     _orig_pc_slot_offset_in_bytes(0),
 239     _buf_sizes(),
 240     _block(nullptr),
 241     _index(0) {
 242   C->set_output(this);
 243   if (C->stub_name() == nullptr) {
 244     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 245   }
 246 }
 247 
 248 PhaseOutput::~PhaseOutput() {
 249   C->set_output(nullptr);
 250   if (_scratch_buffer_blob != nullptr) {
 251     BufferBlob::free(_scratch_buffer_blob);
 252   }
 253 }
 254 
 255 void PhaseOutput::perform_mach_node_analysis() {
 256   // Late barrier analysis must be done after schedule and bundle
 257   // Otherwise liveness based spilling will fail
 258   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 259   bs->late_barrier_analysis();
 260 
 261   pd_perform_mach_node_analysis();
 262 
 263   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 264 }
 265 
 266 // Convert Nodes to instruction bits and pass off to the VM
 267 void PhaseOutput::Output() {
 268   // RootNode goes
 269   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 270 
 271   // The number of new nodes (mostly MachNop) is proportional to
 272   // the number of java calls and inner loops which are aligned.
 273   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 274                             C->inner_loops()*(OptoLoopAlignment-1)),
 275                            "out of nodes before code generation" ) ) {
 276     return;
 277   }
 278   // Make sure I can find the Start Node
 279   Block *entry = C->cfg()->get_block(1);
 280   Block *broot = C->cfg()->get_root_block();
 281 
 282   const StartNode *start = entry->head()->as_Start();
 283 
 284   // Replace StartNode with prolog
 285   MachPrologNode *prolog = new MachPrologNode();

 286   entry->map_node(prolog, 0);
 287   C->cfg()->map_node_to_block(prolog, entry);
 288   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 289 
 290   // Virtual methods need an unverified entry point
 291 
 292   if( C->is_osr_compilation() ) {
 293     if( PoisonOSREntry ) {
 294       // TODO: Should use a ShouldNotReachHereNode...
 295       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 296     }
 297   } else {
 298     if( C->method() && !C->method()->flags().is_static() ) {
 299       // Insert unvalidated entry point
 300       C->cfg()->insert( broot, 0, new MachUEPNode() );











 301     }
 302 
 303   }
 304 
 305   // Break before main entry point
 306   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 307       (OptoBreakpoint && C->is_method_compilation())       ||
 308       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 309       (OptoBreakpointC2R && !C->method())                   ) {
 310     // checking for C->method() means that OptoBreakpoint does not apply to
 311     // runtime stubs or frame converters
 312     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 313   }
 314 
 315   // Insert epilogs before every return
 316   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 317     Block* block = C->cfg()->get_block(i);
 318     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 319       Node* m = block->end();
 320       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 321         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 322         block->add_inst(epilog);
 323         C->cfg()->map_node_to_block(epilog, block);
 324       }
 325     }
 326   }
 327 
 328   // Keeper of sizing aspects
 329   _buf_sizes = BufferSizingData();
 330 
 331   // Initialize code buffer
 332   estimate_buffer_size(_buf_sizes._const);
 333   if (C->failing()) return;
 334 
 335   // Pre-compute the length of blocks and replace
 336   // long branches with short if machine supports it.
 337   // Must be done before ScheduleAndBundle due to SPARC delay slots
 338   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 339   blk_starts[0] = 0;
 340   shorten_branches(blk_starts);
 341 

























 342   ScheduleAndBundle();
 343   if (C->failing()) {
 344     return;
 345   }
 346 
 347   perform_mach_node_analysis();
 348 
 349   // Complete sizing of codebuffer
 350   CodeBuffer* cb = init_buffer();
 351   if (cb == nullptr || C->failing()) {
 352     return;
 353   }
 354 
 355   BuildOopMaps();
 356 
 357   if (C->failing())  {
 358     return;
 359   }
 360 
 361   fill_buffer(cb, blk_starts);

 482     // Sum all instruction sizes to compute block size
 483     uint last_inst = block->number_of_nodes();
 484     uint blk_size = 0;
 485     for (uint j = 0; j < last_inst; j++) {
 486       _index = j;
 487       Node* nj = block->get_node(_index);
 488       // Handle machine instruction nodes
 489       if (nj->is_Mach()) {
 490         MachNode* mach = nj->as_Mach();
 491         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 492         reloc_size += mach->reloc();
 493         if (mach->is_MachCall()) {
 494           // add size information for trampoline stub
 495           // class CallStubImpl is platform-specific and defined in the *.ad files.
 496           stub_size  += CallStubImpl::size_call_trampoline();
 497           reloc_size += CallStubImpl::reloc_call_trampoline();
 498 
 499           MachCallNode *mcall = mach->as_MachCall();
 500           // This destination address is NOT PC-relative
 501 
 502           mcall->method_set((intptr_t)mcall->entry_point());


 503 
 504           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 505             stub_size  += CompiledStaticCall::to_interp_stub_size();
 506             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 507           }
 508         } else if (mach->is_MachSafePoint()) {
 509           // If call/safepoint are adjacent, account for possible
 510           // nop to disambiguate the two safepoints.
 511           // ScheduleAndBundle() can rearrange nodes in a block,
 512           // check for all offsets inside this block.
 513           if (last_call_adr >= blk_starts[i]) {
 514             blk_size += nop_size;
 515           }
 516         }
 517         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 518           // Nop is inserted between "avoid back to back" instructions.
 519           // ScheduleAndBundle() can rearrange nodes in a block,
 520           // check for all offsets inside this block.
 521           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 522             blk_size += nop_size;

 737     // New functionality:
 738     //   Assert if the local is not top. In product mode let the new node
 739     //   override the old entry.
 740     assert(local == C->top(), "LocArray collision");
 741     if (local == C->top()) {
 742       return;
 743     }
 744     array->pop();
 745   }
 746   const Type *t = local->bottom_type();
 747 
 748   // Is it a safepoint scalar object node?
 749   if (local->is_SafePointScalarObject()) {
 750     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 751 
 752     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 753     if (sv == nullptr) {
 754       ciKlass* cik = t->is_oopptr()->exact_klass();
 755       assert(cik->is_instance_klass() ||
 756              cik->is_array_klass(), "Not supported allocation.");

















 757       sv = new ObjectValue(spobj->_idx,
 758                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 759       set_sv_for_object_node(objs, sv);
 760 
 761       uint first_ind = spobj->first_index(sfpt->jvms());
 762       for (uint i = 0; i < spobj->n_fields(); i++) {
 763         Node* fld_node = sfpt->in(first_ind+i);
 764         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 765       }
 766     }
 767     array->append(sv);
 768     return;
 769   } else if (local->is_SafePointScalarMerge()) {
 770     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 771     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 772 
 773     if (mv == NULL) {
 774       GrowableArray<ScopeValue*> deps;
 775 
 776       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 777       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 778       assert(deps.length() == 1, "missing value");
 779 
 780       int selector_idx = smerge->selector_idx(sfpt->jvms());
 781       (void)FillLocArray(1, NULL, sfpt->in(selector_idx), &deps, NULL);

 960 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
 961   for (int k = 0; k < monarray->length(); k++) {
 962     MonitorValue* mv = monarray->at(k);
 963     if (mv->owner() == ov) {
 964       return true;
 965     }
 966   }
 967 
 968   return false;
 969 }
 970 
 971 //--------------------------Process_OopMap_Node--------------------------------
 972 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 973   // Handle special safepoint nodes for synchronization
 974   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 975   MachCallNode      *mcall;
 976 
 977   int safepoint_pc_offset = current_offset;
 978   bool is_method_handle_invoke = false;
 979   bool return_oop = false;

 980   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 981   bool arg_escape = false;
 982 
 983   // Add the safepoint in the DebugInfoRecorder
 984   if( !mach->is_MachCall() ) {
 985     mcall = nullptr;
 986     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 987   } else {
 988     mcall = mach->as_MachCall();
 989 
 990     // Is the call a MethodHandle call?
 991     if (mcall->is_MachCallJava()) {
 992       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 993         assert(C->has_method_handle_invokes(), "must have been set during call generation");
 994         is_method_handle_invoke = true;
 995       }
 996       arg_escape = mcall->as_MachCallJava()->_arg_escape;
 997     }
 998 
 999     // Check if a call returns an object.
1000     if (mcall->returns_pointer()) {
1001       return_oop = true;
1002     }



1003     safepoint_pc_offset += mcall->ret_addr_offset();
1004     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1005   }
1006 
1007   // Loop over the JVMState list to add scope information
1008   // Do not skip safepoints with a null method, they need monitor info
1009   JVMState* youngest_jvms = sfn->jvms();
1010   int max_depth = youngest_jvms->depth();
1011 
1012   // Allocate the object pool for scalar-replaced objects -- the map from
1013   // small-integer keys (which can be recorded in the local and ostack
1014   // arrays) to descriptions of the object state.
1015   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1016 
1017   // Visit scopes from oldest to youngest.
1018   for (int depth = 1; depth <= max_depth; depth++) {
1019     JVMState* jvms = youngest_jvms->of_depth(depth);
1020     int idx;
1021     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1022     // Safepoints that do not have method() set only provide oop-map and monitor info

1121     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1122     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1123 
1124     // Make method available for all Safepoints
1125     ciMethod* scope_method = method ? method : C->method();
1126     // Describe the scope here
1127     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1128     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1129     // Now we can describe the scope.
1130     methodHandle null_mh;
1131     bool rethrow_exception = false;
1132     C->debug_info()->describe_scope(
1133       safepoint_pc_offset,
1134       null_mh,
1135       scope_method,
1136       jvms->bci(),
1137       jvms->should_reexecute(),
1138       rethrow_exception,
1139       is_method_handle_invoke,
1140       return_oop,

1141       has_ea_local_in_scope,
1142       arg_escape,
1143       locvals,
1144       expvals,
1145       monvals
1146     );
1147   } // End jvms loop
1148 
1149   // Mark the end of the scope set.
1150   C->debug_info()->end_safepoint(safepoint_pc_offset);
1151 }
1152 
1153 
1154 
1155 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1156 class NonSafepointEmitter {
1157     Compile*  C;
1158     JVMState* _pending_jvms;
1159     int       _pending_offset;
1160 

1496           MachNode *nop = new MachNopNode(nops_cnt);
1497           block->insert_node(nop, j++);
1498           last_inst++;
1499           C->cfg()->map_node_to_block(nop, block);
1500           // Ensure enough space.
1501           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1502           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1503             C->record_failure("CodeCache is full");
1504             return;
1505           }
1506           nop->emit(*cb, C->regalloc());
1507           cb->flush_bundle(true);
1508           current_offset = cb->insts_size();
1509         }
1510 
1511         bool observe_safepoint = is_sfn;
1512         // Remember the start of the last call in a basic block
1513         if (is_mcall) {
1514           MachCallNode *mcall = mach->as_MachCall();
1515 
1516           // This destination address is NOT PC-relative
1517           mcall->method_set((intptr_t)mcall->entry_point());


1518 
1519           // Save the return address
1520           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1521 
1522           observe_safepoint = mcall->guaranteed_safepoint();
1523         }
1524 
1525         // sfn will be valid whenever mcall is valid now because of inheritance
1526         if (observe_safepoint) {
1527           // Handle special safepoint nodes for synchronization
1528           if (!is_mcall) {
1529             MachSafePointNode *sfn = mach->as_MachSafePoint();
1530             // !!!!! Stubs only need an oopmap right now, so bail out
1531             if (sfn->jvms()->method() == nullptr) {
1532               // Write the oopmap directly to the code blob??!!
1533               continue;
1534             }
1535           } // End synchronization
1536 
1537           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1661       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1662         node_offsets[n->_idx] = cb->insts_size();
1663       }
1664 #endif
1665       assert(!C->failing(), "Should not reach here if failing.");
1666 
1667       // "Normal" instruction case
1668       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1669       n->emit(*cb, C->regalloc());
1670       current_offset = cb->insts_size();
1671 
1672       // Above we only verified that there is enough space in the instruction section.
1673       // However, the instruction may emit stubs that cause code buffer expansion.
1674       // Bail out here if expansion failed due to a lack of code cache space.
1675       if (C->failing()) {
1676         return;
1677       }
1678 
1679       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1680              "ret_addr_offset() not within emitted code");
1681 
1682 #ifdef ASSERT
1683       uint n_size = n->size(C->regalloc());
1684       if (n_size < (current_offset-instr_offset)) {
1685         MachNode* mach = n->as_Mach();
1686         n->dump();
1687         mach->dump_format(C->regalloc(), tty);
1688         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1689         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1690         tty->print_cr(" ------------------- ");
1691         BufferBlob* blob = this->scratch_buffer_blob();
1692         address blob_begin = blob->content_begin();
1693         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1694         assert(false, "wrong size of mach node");
1695       }
1696 #endif
1697       non_safepoints.observe_instruction(n, current_offset);
1698 
1699       // mcall is last "call" that can be a safepoint
1700       // record it so we can see if a poll will directly follow it
1701       // in which case we'll need a pad to make the PcDesc sites unique

3062         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3063         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3064       }
3065     }
3066     // Do not allow defs of new derived values to float above GC
3067     // points unless the base is definitely available at the GC point.
3068 
3069     Node *m = b->get_node(i);
3070 
3071     // Add precedence edge from following safepoint to use of derived pointer
3072     if( last_safept_node != end_node &&
3073         m != last_safept_node) {
3074       for (uint k = 1; k < m->req(); k++) {
3075         const Type *t = m->in(k)->bottom_type();
3076         if( t->isa_oop_ptr() &&
3077             t->is_ptr()->offset() != 0 ) {
3078           last_safept_node->add_prec( m );
3079           break;
3080         }
3081       }













3082     }
3083 
3084     if( n->jvms() ) {           // Precedence edge from derived to safept
3085       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3086       if( b->get_node(last_safept) != last_safept_node ) {
3087         last_safept = b->find_node(last_safept_node);
3088       }
3089       for( uint j=last_safept; j > i; j-- ) {
3090         Node *mach = b->get_node(j);
3091         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3092           mach->add_prec( n );
3093       }
3094       last_safept = i;
3095       last_safept_node = m;
3096     }
3097   }
3098 
3099   if (fat_proj_seen) {
3100     // Garbage collect pinch nodes that were not consumed.
3101     // They are usually created by a fat kill MachProj for a call.

3220 }
3221 #endif
3222 
3223 //-----------------------init_scratch_buffer_blob------------------------------
3224 // Construct a temporary BufferBlob and cache it for this compile.
3225 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3226   // If there is already a scratch buffer blob allocated and the
3227   // constant section is big enough, use it.  Otherwise free the
3228   // current and allocate a new one.
3229   BufferBlob* blob = scratch_buffer_blob();
3230   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3231     // Use the current blob.
3232   } else {
3233     if (blob != nullptr) {
3234       BufferBlob::free(blob);
3235     }
3236 
3237     ResourceMark rm;
3238     _scratch_const_size = const_size;
3239     int size = C2Compiler::initial_code_buffer_size(const_size);



















3240     blob = BufferBlob::create("Compile::scratch_buffer", size);
3241     // Record the buffer blob for next time.
3242     set_scratch_buffer_blob(blob);
3243     // Have we run out of code space?
3244     if (scratch_buffer_blob() == nullptr) {
3245       // Let CompilerBroker disable further compilations.
3246       C->record_failure("Not enough space for scratch buffer in CodeCache");
3247       return;
3248     }
3249   }
3250 
3251   // Initialize the relocation buffers
3252   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3253   set_scratch_locs_memory(locs_buf);
3254 }
3255 
3256 
3257 //-----------------------scratch_emit_size-------------------------------------
3258 // Helper function that computes size by emitting code
3259 uint PhaseOutput::scratch_emit_size(const Node* n) {

3290   buf.insts()->set_scratch_emit();
3291   buf.stubs()->set_scratch_emit();
3292 
3293   // Do the emission.
3294 
3295   Label fakeL; // Fake label for branch instructions.
3296   Label*   saveL = nullptr;
3297   uint save_bnum = 0;
3298   bool is_branch = n->is_MachBranch();
3299   if (is_branch) {
3300     MacroAssembler masm(&buf);
3301     masm.bind(fakeL);
3302     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3303     n->as_MachBranch()->label_set(&fakeL, 0);
3304   }
3305   n->emit(buf, C->regalloc());
3306 
3307   // Emitting into the scratch buffer should not fail
3308   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3309 
3310   if (is_branch) // Restore label.

3311     n->as_MachBranch()->label_set(saveL, save_bnum);

3312 
3313   // End scratch_emit_size section.
3314   set_in_scratch_emit_size(false);
3315 
3316   return buf.insts_size();
3317 }
3318 
3319 void PhaseOutput::install() {
3320   if (!C->should_install_code()) {
3321     return;
3322   } else if (C->stub_function() != nullptr) {
3323     install_stub(C->stub_name());
3324   } else {
3325     install_code(C->method(),
3326                  C->entry_bci(),
3327                  CompileBroker::compiler2(),
3328                  C->has_unsafe_access(),
3329                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3330                  C->rtm_state());
3331   }

3335                                int               entry_bci,
3336                                AbstractCompiler* compiler,
3337                                bool              has_unsafe_access,
3338                                bool              has_wide_vectors,
3339                                RTMState          rtm_state) {
3340   // Check if we want to skip execution of all compiled code.
3341   {
3342 #ifndef PRODUCT
3343     if (OptoNoExecute) {
3344       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3345       return;
3346     }
3347 #endif
3348     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3349 
3350     if (C->is_osr_compilation()) {
3351       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3352       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3353     } else {
3354       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3355       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3356     }
3357 
3358     C->env()->register_method(target,
3359                                      entry_bci,
3360                                      &_code_offsets,
3361                                      _orig_pc_slot_offset_in_bytes,
3362                                      code_buffer(),
3363                                      frame_size_in_words(),
3364                                      oop_map_set(),
3365                                      &_handler_table,
3366                                      inc_table(),
3367                                      compiler,
3368                                      has_unsafe_access,
3369                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3370                                      C->has_monitors(),
3371                                      0,
3372                                      C->rtm_state());
3373 
3374     if (C->log() != nullptr) { // Print code cache state into compiler log
3375       C->log()->code_cache_state();
3376     }
3377   }
3378 }
3379 void PhaseOutput::install_stub(const char* stub_name) {
3380   // Entry point will be accessed using stub_entry_point();
3381   if (code_buffer() == nullptr) {
3382     Matcher::soft_match_failure();
3383   } else {
3384     if (PrintAssembly && (WizardMode || Verbose))
3385       tty->print_cr("### Stub::%s", stub_name);
3386 
3387     if (!C->failing()) {
3388       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3389 
3390       // Make the NMethod
3391       // For now we mark the frame as never safe for profile stackwalking
3392       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 225     _first_block_size(0),
 226     _handler_table(),
 227     _inc_table(),
 228     _stub_list(),
 229     _oop_map_set(nullptr),
 230     _scratch_buffer_blob(nullptr),
 231     _scratch_locs_memory(nullptr),
 232     _scratch_const_size(-1),
 233     _in_scratch_emit_size(false),
 234     _frame_slots(0),
 235     _code_offsets(),
 236     _node_bundling_limit(0),
 237     _node_bundling_base(nullptr),
 238     _orig_pc_slot(0),
 239     _orig_pc_slot_offset_in_bytes(0),
 240     _buf_sizes(),
 241     _block(nullptr),
 242     _index(0) {
 243   C->set_output(this);
 244   if (C->stub_name() == nullptr) {
 245     int fixed_slots = C->fixed_slots();
 246     if (C->needs_stack_repair()) {
 247       fixed_slots -= 2;
 248     }
 249     // TODO 8284443 Only reserve extra slot if needed
 250     if (InlineTypeReturnedAsFields) {
 251       fixed_slots -= 2;
 252     }
 253     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 254   }
 255 }
 256 
 257 PhaseOutput::~PhaseOutput() {
 258   C->set_output(nullptr);
 259   if (_scratch_buffer_blob != nullptr) {
 260     BufferBlob::free(_scratch_buffer_blob);
 261   }
 262 }
 263 
 264 void PhaseOutput::perform_mach_node_analysis() {
 265   // Late barrier analysis must be done after schedule and bundle
 266   // Otherwise liveness based spilling will fail
 267   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 268   bs->late_barrier_analysis();
 269 
 270   pd_perform_mach_node_analysis();
 271 
 272   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 273 }
 274 
 275 // Convert Nodes to instruction bits and pass off to the VM
 276 void PhaseOutput::Output() {
 277   // RootNode goes
 278   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 279 
 280   // The number of new nodes (mostly MachNop) is proportional to
 281   // the number of java calls and inner loops which are aligned.
 282   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 283                             C->inner_loops()*(OptoLoopAlignment-1)),
 284                            "out of nodes before code generation" ) ) {
 285     return;
 286   }
 287   // Make sure I can find the Start Node
 288   Block *entry = C->cfg()->get_block(1);
 289   Block *broot = C->cfg()->get_root_block();
 290 
 291   const StartNode *start = entry->head()->as_Start();
 292 
 293   // Replace StartNode with prolog
 294   Label verified_entry;
 295   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 296   entry->map_node(prolog, 0);
 297   C->cfg()->map_node_to_block(prolog, entry);
 298   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 299 
 300   // Virtual methods need an unverified entry point
 301   if (C->is_osr_compilation()) {
 302     if (PoisonOSREntry) {

 303       // TODO: Should use a ShouldNotReachHereNode...
 304       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 305     }
 306   } else {
 307     if (C->method()) {
 308       if (C->method()->has_scalarized_args()) {
 309         // Add entry point to unpack all inline type arguments
 310         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 311         if (!C->method()->is_static()) {
 312           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 313           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 314           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 315           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 316         }
 317       } else if (!C->method()->is_static()) {
 318         // Insert unvalidated entry point
 319         C->cfg()->insert(broot, 0, new MachUEPNode());
 320       }
 321     }

 322   }
 323 
 324   // Break before main entry point
 325   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 326       (OptoBreakpoint && C->is_method_compilation())       ||
 327       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 328       (OptoBreakpointC2R && !C->method())                   ) {
 329     // checking for C->method() means that OptoBreakpoint does not apply to
 330     // runtime stubs or frame converters
 331     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 332   }
 333 
 334   // Insert epilogs before every return
 335   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 336     Block* block = C->cfg()->get_block(i);
 337     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 338       Node* m = block->end();
 339       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 340         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 341         block->add_inst(epilog);
 342         C->cfg()->map_node_to_block(epilog, block);
 343       }
 344     }
 345   }
 346 
 347   // Keeper of sizing aspects
 348   _buf_sizes = BufferSizingData();
 349 
 350   // Initialize code buffer
 351   estimate_buffer_size(_buf_sizes._const);
 352   if (C->failing()) return;
 353 
 354   // Pre-compute the length of blocks and replace
 355   // long branches with short if machine supports it.
 356   // Must be done before ScheduleAndBundle due to SPARC delay slots
 357   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 358   blk_starts[0] = 0;
 359   shorten_branches(blk_starts);
 360 
 361   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 362     // Compute the offsets of the entry points required by the inline type calling convention
 363     if (!C->method()->is_static()) {
 364       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 365       // Entry                     (unverified) @ offset 0
 366       // Verified_Inline_Entry_RO
 367       // Inline_Entry              (unverified)
 368       // Verified_Inline_Entry
 369       uint offset = 0;
 370       _code_offsets.set_value(CodeOffsets::Entry, offset);
 371 
 372       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 373       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 374 
 375       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 376       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 377 
 378       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 379       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 380     } else {
 381       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 382       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 383     }
 384   }
 385 
 386   ScheduleAndBundle();
 387   if (C->failing()) {
 388     return;
 389   }
 390 
 391   perform_mach_node_analysis();
 392 
 393   // Complete sizing of codebuffer
 394   CodeBuffer* cb = init_buffer();
 395   if (cb == nullptr || C->failing()) {
 396     return;
 397   }
 398 
 399   BuildOopMaps();
 400 
 401   if (C->failing())  {
 402     return;
 403   }
 404 
 405   fill_buffer(cb, blk_starts);

 526     // Sum all instruction sizes to compute block size
 527     uint last_inst = block->number_of_nodes();
 528     uint blk_size = 0;
 529     for (uint j = 0; j < last_inst; j++) {
 530       _index = j;
 531       Node* nj = block->get_node(_index);
 532       // Handle machine instruction nodes
 533       if (nj->is_Mach()) {
 534         MachNode* mach = nj->as_Mach();
 535         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 536         reloc_size += mach->reloc();
 537         if (mach->is_MachCall()) {
 538           // add size information for trampoline stub
 539           // class CallStubImpl is platform-specific and defined in the *.ad files.
 540           stub_size  += CallStubImpl::size_call_trampoline();
 541           reloc_size += CallStubImpl::reloc_call_trampoline();
 542 
 543           MachCallNode *mcall = mach->as_MachCall();
 544           // This destination address is NOT PC-relative
 545 
 546           if (mcall->entry_point() != nullptr) {
 547             mcall->method_set((intptr_t)mcall->entry_point());
 548           }
 549 
 550           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 551             stub_size  += CompiledStaticCall::to_interp_stub_size();
 552             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 553           }
 554         } else if (mach->is_MachSafePoint()) {
 555           // If call/safepoint are adjacent, account for possible
 556           // nop to disambiguate the two safepoints.
 557           // ScheduleAndBundle() can rearrange nodes in a block,
 558           // check for all offsets inside this block.
 559           if (last_call_adr >= blk_starts[i]) {
 560             blk_size += nop_size;
 561           }
 562         }
 563         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 564           // Nop is inserted between "avoid back to back" instructions.
 565           // ScheduleAndBundle() can rearrange nodes in a block,
 566           // check for all offsets inside this block.
 567           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 568             blk_size += nop_size;

 783     // New functionality:
 784     //   Assert if the local is not top. In product mode let the new node
 785     //   override the old entry.
 786     assert(local == C->top(), "LocArray collision");
 787     if (local == C->top()) {
 788       return;
 789     }
 790     array->pop();
 791   }
 792   const Type *t = local->bottom_type();
 793 
 794   // Is it a safepoint scalar object node?
 795   if (local->is_SafePointScalarObject()) {
 796     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 797 
 798     ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
 799     if (sv == nullptr) {
 800       ciKlass* cik = t->is_oopptr()->exact_klass();
 801       assert(cik->is_instance_klass() ||
 802              cik->is_array_klass(), "Not supported allocation.");
 803       uint first_ind = spobj->first_index(sfpt->jvms());
 804       // Nullable, scalarized inline types have an is_init input
 805       // that needs to be checked before using the field values.
 806       ScopeValue* is_init = nullptr;
 807       if (cik->is_inlinetype()) {
 808         Node* init_node = sfpt->in(first_ind++);
 809         assert(init_node != nullptr, "is_init node not found");
 810         if (!init_node->is_top()) {
 811           const TypeInt* init_type = init_node->bottom_type()->is_int();
 812           if (init_node->is_Con()) {
 813             is_init = new ConstantIntValue(init_type->get_con());
 814           } else {
 815             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 816             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 817           }
 818         }
 819       }
 820       sv = new ObjectValue(spobj->_idx,
 821                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 822       set_sv_for_object_node(objs, sv);
 823 

 824       for (uint i = 0; i < spobj->n_fields(); i++) {
 825         Node* fld_node = sfpt->in(first_ind+i);
 826         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 827       }
 828     }
 829     array->append(sv);
 830     return;
 831   } else if (local->is_SafePointScalarMerge()) {
 832     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 833     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 834 
 835     if (mv == NULL) {
 836       GrowableArray<ScopeValue*> deps;
 837 
 838       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 839       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 840       assert(deps.length() == 1, "missing value");
 841 
 842       int selector_idx = smerge->selector_idx(sfpt->jvms());
 843       (void)FillLocArray(1, NULL, sfpt->in(selector_idx), &deps, NULL);

1022 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
1023   for (int k = 0; k < monarray->length(); k++) {
1024     MonitorValue* mv = monarray->at(k);
1025     if (mv->owner() == ov) {
1026       return true;
1027     }
1028   }
1029 
1030   return false;
1031 }
1032 
1033 //--------------------------Process_OopMap_Node--------------------------------
1034 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1035   // Handle special safepoint nodes for synchronization
1036   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1037   MachCallNode      *mcall;
1038 
1039   int safepoint_pc_offset = current_offset;
1040   bool is_method_handle_invoke = false;
1041   bool return_oop = false;
1042   bool return_scalarized = false;
1043   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1044   bool arg_escape = false;
1045 
1046   // Add the safepoint in the DebugInfoRecorder
1047   if( !mach->is_MachCall() ) {
1048     mcall = nullptr;
1049     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1050   } else {
1051     mcall = mach->as_MachCall();
1052 
1053     // Is the call a MethodHandle call?
1054     if (mcall->is_MachCallJava()) {
1055       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1056         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1057         is_method_handle_invoke = true;
1058       }
1059       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1060     }
1061 
1062     // Check if a call returns an object.
1063     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1064       return_oop = true;
1065     }
1066     if (mcall->returns_scalarized()) {
1067       return_scalarized = true;
1068     }
1069     safepoint_pc_offset += mcall->ret_addr_offset();
1070     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1071   }
1072 
1073   // Loop over the JVMState list to add scope information
1074   // Do not skip safepoints with a null method, they need monitor info
1075   JVMState* youngest_jvms = sfn->jvms();
1076   int max_depth = youngest_jvms->depth();
1077 
1078   // Allocate the object pool for scalar-replaced objects -- the map from
1079   // small-integer keys (which can be recorded in the local and ostack
1080   // arrays) to descriptions of the object state.
1081   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1082 
1083   // Visit scopes from oldest to youngest.
1084   for (int depth = 1; depth <= max_depth; depth++) {
1085     JVMState* jvms = youngest_jvms->of_depth(depth);
1086     int idx;
1087     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1088     // Safepoints that do not have method() set only provide oop-map and monitor info

1187     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1188     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1189 
1190     // Make method available for all Safepoints
1191     ciMethod* scope_method = method ? method : C->method();
1192     // Describe the scope here
1193     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1194     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1195     // Now we can describe the scope.
1196     methodHandle null_mh;
1197     bool rethrow_exception = false;
1198     C->debug_info()->describe_scope(
1199       safepoint_pc_offset,
1200       null_mh,
1201       scope_method,
1202       jvms->bci(),
1203       jvms->should_reexecute(),
1204       rethrow_exception,
1205       is_method_handle_invoke,
1206       return_oop,
1207       return_scalarized,
1208       has_ea_local_in_scope,
1209       arg_escape,
1210       locvals,
1211       expvals,
1212       monvals
1213     );
1214   } // End jvms loop
1215 
1216   // Mark the end of the scope set.
1217   C->debug_info()->end_safepoint(safepoint_pc_offset);
1218 }
1219 
1220 
1221 
1222 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1223 class NonSafepointEmitter {
1224     Compile*  C;
1225     JVMState* _pending_jvms;
1226     int       _pending_offset;
1227 

1563           MachNode *nop = new MachNopNode(nops_cnt);
1564           block->insert_node(nop, j++);
1565           last_inst++;
1566           C->cfg()->map_node_to_block(nop, block);
1567           // Ensure enough space.
1568           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1569           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1570             C->record_failure("CodeCache is full");
1571             return;
1572           }
1573           nop->emit(*cb, C->regalloc());
1574           cb->flush_bundle(true);
1575           current_offset = cb->insts_size();
1576         }
1577 
1578         bool observe_safepoint = is_sfn;
1579         // Remember the start of the last call in a basic block
1580         if (is_mcall) {
1581           MachCallNode *mcall = mach->as_MachCall();
1582 
1583           if (mcall->entry_point() != nullptr) {
1584             // This destination address is NOT PC-relative
1585             mcall->method_set((intptr_t)mcall->entry_point());
1586           }
1587 
1588           // Save the return address
1589           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1590 
1591           observe_safepoint = mcall->guaranteed_safepoint();
1592         }
1593 
1594         // sfn will be valid whenever mcall is valid now because of inheritance
1595         if (observe_safepoint) {
1596           // Handle special safepoint nodes for synchronization
1597           if (!is_mcall) {
1598             MachSafePointNode *sfn = mach->as_MachSafePoint();
1599             // !!!!! Stubs only need an oopmap right now, so bail out
1600             if (sfn->jvms()->method() == nullptr) {
1601               // Write the oopmap directly to the code blob??!!
1602               continue;
1603             }
1604           } // End synchronization
1605 
1606           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1730       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1731         node_offsets[n->_idx] = cb->insts_size();
1732       }
1733 #endif
1734       assert(!C->failing(), "Should not reach here if failing.");
1735 
1736       // "Normal" instruction case
1737       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1738       n->emit(*cb, C->regalloc());
1739       current_offset = cb->insts_size();
1740 
1741       // Above we only verified that there is enough space in the instruction section.
1742       // However, the instruction may emit stubs that cause code buffer expansion.
1743       // Bail out here if expansion failed due to a lack of code cache space.
1744       if (C->failing()) {
1745         return;
1746       }
1747 
1748       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1749              "ret_addr_offset() not within emitted code");

1750 #ifdef ASSERT
1751       uint n_size = n->size(C->regalloc());
1752       if (n_size < (current_offset-instr_offset)) {
1753         MachNode* mach = n->as_Mach();
1754         n->dump();
1755         mach->dump_format(C->regalloc(), tty);
1756         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1757         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1758         tty->print_cr(" ------------------- ");
1759         BufferBlob* blob = this->scratch_buffer_blob();
1760         address blob_begin = blob->content_begin();
1761         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1762         assert(false, "wrong size of mach node");
1763       }
1764 #endif
1765       non_safepoints.observe_instruction(n, current_offset);
1766 
1767       // mcall is last "call" that can be a safepoint
1768       // record it so we can see if a poll will directly follow it
1769       // in which case we'll need a pad to make the PcDesc sites unique

3130         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3131         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3132       }
3133     }
3134     // Do not allow defs of new derived values to float above GC
3135     // points unless the base is definitely available at the GC point.
3136 
3137     Node *m = b->get_node(i);
3138 
3139     // Add precedence edge from following safepoint to use of derived pointer
3140     if( last_safept_node != end_node &&
3141         m != last_safept_node) {
3142       for (uint k = 1; k < m->req(); k++) {
3143         const Type *t = m->in(k)->bottom_type();
3144         if( t->isa_oop_ptr() &&
3145             t->is_ptr()->offset() != 0 ) {
3146           last_safept_node->add_prec( m );
3147           break;
3148         }
3149       }
3150 
3151       // Do not allow a CheckCastPP node whose input is a raw pointer to
3152       // float past a safepoint.  This can occur when a buffered inline
3153       // type is allocated in a loop and the CheckCastPP from that
3154       // allocation is reused outside the loop.  If the use inside the
3155       // loop is scalarized the CheckCastPP will no longer be connected
3156       // to the loop safepoint.  See JDK-8264340.
3157       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3158         Node *def = m->in(1);
3159         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3160           last_safept_node->add_prec(m);
3161         }
3162       }
3163     }
3164 
3165     if( n->jvms() ) {           // Precedence edge from derived to safept
3166       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3167       if( b->get_node(last_safept) != last_safept_node ) {
3168         last_safept = b->find_node(last_safept_node);
3169       }
3170       for( uint j=last_safept; j > i; j-- ) {
3171         Node *mach = b->get_node(j);
3172         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3173           mach->add_prec( n );
3174       }
3175       last_safept = i;
3176       last_safept_node = m;
3177     }
3178   }
3179 
3180   if (fat_proj_seen) {
3181     // Garbage collect pinch nodes that were not consumed.
3182     // They are usually created by a fat kill MachProj for a call.

3301 }
3302 #endif
3303 
3304 //-----------------------init_scratch_buffer_blob------------------------------
3305 // Construct a temporary BufferBlob and cache it for this compile.
3306 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3307   // If there is already a scratch buffer blob allocated and the
3308   // constant section is big enough, use it.  Otherwise free the
3309   // current and allocate a new one.
3310   BufferBlob* blob = scratch_buffer_blob();
3311   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3312     // Use the current blob.
3313   } else {
3314     if (blob != nullptr) {
3315       BufferBlob::free(blob);
3316     }
3317 
3318     ResourceMark rm;
3319     _scratch_const_size = const_size;
3320     int size = C2Compiler::initial_code_buffer_size(const_size);
3321     if (C->has_scalarized_args()) {
3322       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3323       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3324       ciMethod* method = C->method();
3325       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3326       int arg_num = 0;
3327       if (!method->is_static()) {
3328         if (method->is_scalarized_arg(arg_num)) {
3329           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3330         }
3331         arg_num++;
3332       }
3333       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3334         if (method->is_scalarized_arg(arg_num)) {
3335           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3336         }
3337         arg_num++;
3338       }
3339     }
3340     blob = BufferBlob::create("Compile::scratch_buffer", size);
3341     // Record the buffer blob for next time.
3342     set_scratch_buffer_blob(blob);
3343     // Have we run out of code space?
3344     if (scratch_buffer_blob() == nullptr) {
3345       // Let CompilerBroker disable further compilations.
3346       C->record_failure("Not enough space for scratch buffer in CodeCache");
3347       return;
3348     }
3349   }
3350 
3351   // Initialize the relocation buffers
3352   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3353   set_scratch_locs_memory(locs_buf);
3354 }
3355 
3356 
3357 //-----------------------scratch_emit_size-------------------------------------
3358 // Helper function that computes size by emitting code
3359 uint PhaseOutput::scratch_emit_size(const Node* n) {

3390   buf.insts()->set_scratch_emit();
3391   buf.stubs()->set_scratch_emit();
3392 
3393   // Do the emission.
3394 
3395   Label fakeL; // Fake label for branch instructions.
3396   Label*   saveL = nullptr;
3397   uint save_bnum = 0;
3398   bool is_branch = n->is_MachBranch();
3399   if (is_branch) {
3400     MacroAssembler masm(&buf);
3401     masm.bind(fakeL);
3402     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3403     n->as_MachBranch()->label_set(&fakeL, 0);
3404   }
3405   n->emit(buf, C->regalloc());
3406 
3407   // Emitting into the scratch buffer should not fail
3408   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3409 
3410   // Restore label.
3411   if (is_branch) {
3412     n->as_MachBranch()->label_set(saveL, save_bnum);
3413   }
3414 
3415   // End scratch_emit_size section.
3416   set_in_scratch_emit_size(false);
3417 
3418   return buf.insts_size();
3419 }
3420 
3421 void PhaseOutput::install() {
3422   if (!C->should_install_code()) {
3423     return;
3424   } else if (C->stub_function() != nullptr) {
3425     install_stub(C->stub_name());
3426   } else {
3427     install_code(C->method(),
3428                  C->entry_bci(),
3429                  CompileBroker::compiler2(),
3430                  C->has_unsafe_access(),
3431                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3432                  C->rtm_state());
3433   }

3437                                int               entry_bci,
3438                                AbstractCompiler* compiler,
3439                                bool              has_unsafe_access,
3440                                bool              has_wide_vectors,
3441                                RTMState          rtm_state) {
3442   // Check if we want to skip execution of all compiled code.
3443   {
3444 #ifndef PRODUCT
3445     if (OptoNoExecute) {
3446       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3447       return;
3448     }
3449 #endif
3450     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3451 
3452     if (C->is_osr_compilation()) {
3453       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3454       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3455     } else {
3456       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3457       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3458         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3459       }
3460       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3461         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3462       }
3463       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3464         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3465       }
3466       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3467     }
3468 
3469     C->env()->register_method(target,
3470                               entry_bci,
3471                               &_code_offsets,
3472                               _orig_pc_slot_offset_in_bytes,
3473                               code_buffer(),
3474                               frame_size_in_words(),
3475                               _oop_map_set,
3476                               &_handler_table,
3477                               inc_table(),
3478                               compiler,
3479                               has_unsafe_access,
3480                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3481                               C->has_monitors(),
3482                               0,
3483                               C->rtm_state());
3484 
3485     if (C->log() != nullptr) { // Print code cache state into compiler log
3486       C->log()->code_cache_state();
3487     }
3488   }
3489 }
3490 void PhaseOutput::install_stub(const char* stub_name) {
3491   // Entry point will be accessed using stub_entry_point();
3492   if (code_buffer() == nullptr) {
3493     Matcher::soft_match_failure();
3494   } else {
3495     if (PrintAssembly && (WizardMode || Verbose))
3496       tty->print_cr("### Stub::%s", stub_name);
3497 
3498     if (!C->failing()) {
3499       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3500 
3501       // Make the NMethod
3502       // For now we mark the frame as never safe for profile stackwalking
3503       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >