14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/debugInfo.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compilerDirectives.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/c2/barrierSetC2.hpp"
35 #include "memory/allocation.hpp"
36 #include "opto/ad.hpp"
37 #include "opto/block.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/c2_MacroAssembler.hpp"
40 #include "opto/callnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/locknode.hpp"
43 #include "opto/machnode.hpp"
44 #include "opto/node.hpp"
45 #include "opto/optoreg.hpp"
46 #include "opto/output.hpp"
47 #include "opto/regalloc.hpp"
48 #include "opto/type.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "utilities/macros.hpp"
51 #include "utilities/powerOfTwo.hpp"
52 #include "utilities/xmlstream.hpp"
53
223 _first_block_size(0),
224 _handler_table(),
225 _inc_table(),
226 _stub_list(),
227 _oop_map_set(nullptr),
228 _scratch_buffer_blob(nullptr),
229 _scratch_locs_memory(nullptr),
230 _scratch_const_size(-1),
231 _in_scratch_emit_size(false),
232 _frame_slots(0),
233 _code_offsets(),
234 _node_bundling_limit(0),
235 _node_bundling_base(nullptr),
236 _orig_pc_slot(0),
237 _orig_pc_slot_offset_in_bytes(0),
238 _buf_sizes(),
239 _block(nullptr),
240 _index(0) {
241 C->set_output(this);
242 if (C->stub_name() == nullptr) {
243 _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
244 }
245 }
246
247 PhaseOutput::~PhaseOutput() {
248 C->set_output(nullptr);
249 if (_scratch_buffer_blob != nullptr) {
250 BufferBlob::free(_scratch_buffer_blob);
251 }
252 }
253
254 void PhaseOutput::perform_mach_node_analysis() {
255 // Late barrier analysis must be done after schedule and bundle
256 // Otherwise liveness based spilling will fail
257 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
258 bs->late_barrier_analysis();
259
260 pd_perform_mach_node_analysis();
261
262 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
263 }
264
265 // Convert Nodes to instruction bits and pass off to the VM
266 void PhaseOutput::Output() {
267 // RootNode goes
268 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
269
270 // The number of new nodes (mostly MachNop) is proportional to
271 // the number of java calls and inner loops which are aligned.
272 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
273 C->inner_loops()*(OptoLoopAlignment-1)),
274 "out of nodes before code generation" ) ) {
275 return;
276 }
277 // Make sure I can find the Start Node
278 Block *entry = C->cfg()->get_block(1);
279 Block *broot = C->cfg()->get_root_block();
280
281 const StartNode *start = entry->head()->as_Start();
282
283 // Replace StartNode with prolog
284 MachPrologNode *prolog = new MachPrologNode();
285 entry->map_node(prolog, 0);
286 C->cfg()->map_node_to_block(prolog, entry);
287 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
288
289 // Virtual methods need an unverified entry point
290
291 if( C->is_osr_compilation() ) {
292 if( PoisonOSREntry ) {
293 // TODO: Should use a ShouldNotReachHereNode...
294 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
295 }
296 } else {
297 if( C->method() && !C->method()->flags().is_static() ) {
298 // Insert unvalidated entry point
299 C->cfg()->insert( broot, 0, new MachUEPNode() );
300 }
301
302 }
303
304 // Break before main entry point
305 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
306 (OptoBreakpoint && C->is_method_compilation()) ||
307 (OptoBreakpointOSR && C->is_osr_compilation()) ||
308 (OptoBreakpointC2R && !C->method()) ) {
309 // checking for C->method() means that OptoBreakpoint does not apply to
310 // runtime stubs or frame converters
311 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
312 }
313
314 // Insert epilogs before every return
315 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
316 Block* block = C->cfg()->get_block(i);
317 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
318 Node* m = block->end();
319 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
320 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
321 block->add_inst(epilog);
322 C->cfg()->map_node_to_block(epilog, block);
323 }
324 }
325 }
326
327 // Keeper of sizing aspects
328 _buf_sizes = BufferSizingData();
329
330 // Initialize code buffer
331 estimate_buffer_size(_buf_sizes._const);
332 if (C->failing()) return;
333
334 // Pre-compute the length of blocks and replace
335 // long branches with short if machine supports it.
336 // Must be done before ScheduleAndBundle due to SPARC delay slots
337 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
338 blk_starts[0] = 0;
339 shorten_branches(blk_starts);
340
341 ScheduleAndBundle();
342 if (C->failing()) {
343 return;
344 }
345
346 perform_mach_node_analysis();
347
348 // Complete sizing of codebuffer
349 CodeBuffer* cb = init_buffer();
350 if (cb == nullptr || C->failing()) {
351 return;
352 }
353
354 BuildOopMaps();
355
356 if (C->failing()) {
357 return;
358 }
359
360 C2_MacroAssembler masm(cb);
482 // Sum all instruction sizes to compute block size
483 uint last_inst = block->number_of_nodes();
484 uint blk_size = 0;
485 for (uint j = 0; j < last_inst; j++) {
486 _index = j;
487 Node* nj = block->get_node(_index);
488 // Handle machine instruction nodes
489 if (nj->is_Mach()) {
490 MachNode* mach = nj->as_Mach();
491 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
492 reloc_size += mach->reloc();
493 if (mach->is_MachCall()) {
494 // add size information for trampoline stub
495 // class CallStubImpl is platform-specific and defined in the *.ad files.
496 stub_size += CallStubImpl::size_call_trampoline();
497 reloc_size += CallStubImpl::reloc_call_trampoline();
498
499 MachCallNode *mcall = mach->as_MachCall();
500 // This destination address is NOT PC-relative
501
502 mcall->method_set((intptr_t)mcall->entry_point());
503
504 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
505 stub_size += CompiledDirectCall::to_interp_stub_size();
506 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
507 }
508 } else if (mach->is_MachSafePoint()) {
509 // If call/safepoint are adjacent, account for possible
510 // nop to disambiguate the two safepoints.
511 // ScheduleAndBundle() can rearrange nodes in a block,
512 // check for all offsets inside this block.
513 if (last_call_adr >= blk_starts[i]) {
514 blk_size += nop_size;
515 }
516 }
517 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
518 // Nop is inserted between "avoid back to back" instructions.
519 // ScheduleAndBundle() can rearrange nodes in a block,
520 // check for all offsets inside this block.
521 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
522 blk_size += nop_size;
737 // New functionality:
738 // Assert if the local is not top. In product mode let the new node
739 // override the old entry.
740 assert(local == C->top(), "LocArray collision");
741 if (local == C->top()) {
742 return;
743 }
744 array->pop();
745 }
746 const Type *t = local->bottom_type();
747
748 // Is it a safepoint scalar object node?
749 if (local->is_SafePointScalarObject()) {
750 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
751
752 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
753 if (sv == nullptr) {
754 ciKlass* cik = t->is_oopptr()->exact_klass();
755 assert(cik->is_instance_klass() ||
756 cik->is_array_klass(), "Not supported allocation.");
757 sv = new ObjectValue(spobj->_idx,
758 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
759 set_sv_for_object_node(objs, sv);
760
761 uint first_ind = spobj->first_index(sfpt->jvms());
762 for (uint i = 0; i < spobj->n_fields(); i++) {
763 Node* fld_node = sfpt->in(first_ind+i);
764 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
765 }
766 }
767 array->append(sv);
768 return;
769 } else if (local->is_SafePointScalarMerge()) {
770 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
771 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
772
773 if (mv == nullptr) {
774 GrowableArray<ScopeValue*> deps;
775
776 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
777 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
778 assert(deps.length() == 1, "missing value");
779
780 int selector_idx = smerge->selector_idx(sfpt->jvms());
781 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
988 continue;
989 }
990
991 ObjectValue* other = sv_for_node_id(objs, n->_idx);
992 if (ov == other) {
993 return true;
994 }
995 }
996 return false;
997 }
998
999 //--------------------------Process_OopMap_Node--------------------------------
1000 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1001 // Handle special safepoint nodes for synchronization
1002 MachSafePointNode *sfn = mach->as_MachSafePoint();
1003 MachCallNode *mcall;
1004
1005 int safepoint_pc_offset = current_offset;
1006 bool is_method_handle_invoke = false;
1007 bool return_oop = false;
1008 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1009 bool arg_escape = false;
1010
1011 // Add the safepoint in the DebugInfoRecorder
1012 if( !mach->is_MachCall() ) {
1013 mcall = nullptr;
1014 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1015 } else {
1016 mcall = mach->as_MachCall();
1017
1018 // Is the call a MethodHandle call?
1019 if (mcall->is_MachCallJava()) {
1020 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1021 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1022 is_method_handle_invoke = true;
1023 }
1024 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1025 }
1026
1027 // Check if a call returns an object.
1028 if (mcall->returns_pointer()) {
1029 return_oop = true;
1030 }
1031 safepoint_pc_offset += mcall->ret_addr_offset();
1032 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1033 }
1034
1035 // Loop over the JVMState list to add scope information
1036 // Do not skip safepoints with a null method, they need monitor info
1037 JVMState* youngest_jvms = sfn->jvms();
1038 int max_depth = youngest_jvms->depth();
1039
1040 // Allocate the object pool for scalar-replaced objects -- the map from
1041 // small-integer keys (which can be recorded in the local and ostack
1042 // arrays) to descriptions of the object state.
1043 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1044
1045 // Visit scopes from oldest to youngest.
1046 for (int depth = 1; depth <= max_depth; depth++) {
1047 JVMState* jvms = youngest_jvms->of_depth(depth);
1048 int idx;
1049 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1050 // Safepoints that do not have method() set only provide oop-map and monitor info
1189 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1190 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1191
1192 // Make method available for all Safepoints
1193 ciMethod* scope_method = method ? method : C->method();
1194 // Describe the scope here
1195 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1196 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1197 // Now we can describe the scope.
1198 methodHandle null_mh;
1199 bool rethrow_exception = false;
1200 C->debug_info()->describe_scope(
1201 safepoint_pc_offset,
1202 null_mh,
1203 scope_method,
1204 jvms->bci(),
1205 jvms->should_reexecute(),
1206 rethrow_exception,
1207 is_method_handle_invoke,
1208 return_oop,
1209 has_ea_local_in_scope,
1210 arg_escape,
1211 locvals,
1212 expvals,
1213 monvals
1214 );
1215 } // End jvms loop
1216
1217 // Mark the end of the scope set.
1218 C->debug_info()->end_safepoint(safepoint_pc_offset);
1219 }
1220
1221
1222
1223 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1224 class NonSafepointEmitter {
1225 Compile* C;
1226 JVMState* _pending_jvms;
1227 int _pending_offset;
1228
1564 MachNode *nop = new MachNopNode(nops_cnt);
1565 block->insert_node(nop, j++);
1566 last_inst++;
1567 C->cfg()->map_node_to_block(nop, block);
1568 // Ensure enough space.
1569 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1570 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1571 C->record_failure("CodeCache is full");
1572 return;
1573 }
1574 nop->emit(masm, C->regalloc());
1575 masm->code()->flush_bundle(true);
1576 current_offset = masm->offset();
1577 }
1578
1579 bool observe_safepoint = is_sfn;
1580 // Remember the start of the last call in a basic block
1581 if (is_mcall) {
1582 MachCallNode *mcall = mach->as_MachCall();
1583
1584 // This destination address is NOT PC-relative
1585 mcall->method_set((intptr_t)mcall->entry_point());
1586
1587 // Save the return address
1588 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1589
1590 observe_safepoint = mcall->guaranteed_safepoint();
1591 }
1592
1593 // sfn will be valid whenever mcall is valid now because of inheritance
1594 if (observe_safepoint) {
1595 // Handle special safepoint nodes for synchronization
1596 if (!is_mcall) {
1597 MachSafePointNode *sfn = mach->as_MachSafePoint();
1598 // !!!!! Stubs only need an oopmap right now, so bail out
1599 if (sfn->jvms()->method() == nullptr) {
1600 // Write the oopmap directly to the code blob??!!
1601 continue;
1602 }
1603 } // End synchronization
1604
1605 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1706 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1707 node_offsets[n->_idx] = masm->offset();
1708 }
1709 #endif
1710 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1711
1712 // "Normal" instruction case
1713 DEBUG_ONLY(uint instr_offset = masm->offset());
1714 n->emit(masm, C->regalloc());
1715 current_offset = masm->offset();
1716
1717 // Above we only verified that there is enough space in the instruction section.
1718 // However, the instruction may emit stubs that cause code buffer expansion.
1719 // Bail out here if expansion failed due to a lack of code cache space.
1720 if (C->failing()) {
1721 return;
1722 }
1723
1724 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1725 "ret_addr_offset() not within emitted code");
1726
1727 #ifdef ASSERT
1728 uint n_size = n->size(C->regalloc());
1729 if (n_size < (current_offset-instr_offset)) {
1730 MachNode* mach = n->as_Mach();
1731 n->dump();
1732 mach->dump_format(C->regalloc(), tty);
1733 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1734 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1735 tty->print_cr(" ------------------- ");
1736 BufferBlob* blob = this->scratch_buffer_blob();
1737 address blob_begin = blob->content_begin();
1738 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1739 assert(false, "wrong size of mach node");
1740 }
1741 #endif
1742 non_safepoints.observe_instruction(n, current_offset);
1743
1744 // mcall is last "call" that can be a safepoint
1745 // record it so we can see if a poll will directly follow it
1746 // in which case we'll need a pad to make the PcDesc sites unique
3142 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3143 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3144 }
3145 }
3146 // Do not allow defs of new derived values to float above GC
3147 // points unless the base is definitely available at the GC point.
3148
3149 Node *m = b->get_node(i);
3150
3151 // Add precedence edge from following safepoint to use of derived pointer
3152 if( last_safept_node != end_node &&
3153 m != last_safept_node) {
3154 for (uint k = 1; k < m->req(); k++) {
3155 const Type *t = m->in(k)->bottom_type();
3156 if( t->isa_oop_ptr() &&
3157 t->is_ptr()->offset() != 0 ) {
3158 last_safept_node->add_prec( m );
3159 break;
3160 }
3161 }
3162 }
3163
3164 if( n->jvms() ) { // Precedence edge from derived to safept
3165 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3166 if( b->get_node(last_safept) != last_safept_node ) {
3167 last_safept = b->find_node(last_safept_node);
3168 }
3169 for( uint j=last_safept; j > i; j-- ) {
3170 Node *mach = b->get_node(j);
3171 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3172 mach->add_prec( n );
3173 }
3174 last_safept = i;
3175 last_safept_node = m;
3176 }
3177 }
3178
3179 if (fat_proj_seen) {
3180 // Garbage collect pinch nodes that were not consumed.
3181 // They are usually created by a fat kill MachProj for a call.
3300 }
3301 #endif
3302
3303 //-----------------------init_scratch_buffer_blob------------------------------
3304 // Construct a temporary BufferBlob and cache it for this compile.
3305 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3306 // If there is already a scratch buffer blob allocated and the
3307 // constant section is big enough, use it. Otherwise free the
3308 // current and allocate a new one.
3309 BufferBlob* blob = scratch_buffer_blob();
3310 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3311 // Use the current blob.
3312 } else {
3313 if (blob != nullptr) {
3314 BufferBlob::free(blob);
3315 }
3316
3317 ResourceMark rm;
3318 _scratch_const_size = const_size;
3319 int size = C2Compiler::initial_code_buffer_size(const_size);
3320 blob = BufferBlob::create("Compile::scratch_buffer", size);
3321 // Record the buffer blob for next time.
3322 set_scratch_buffer_blob(blob);
3323 // Have we run out of code space?
3324 if (scratch_buffer_blob() == nullptr) {
3325 // Let CompilerBroker disable further compilations.
3326 C->record_failure("Not enough space for scratch buffer in CodeCache");
3327 return;
3328 }
3329 }
3330
3331 // Initialize the relocation buffers
3332 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3333 set_scratch_locs_memory(locs_buf);
3334 }
3335
3336
3337 //-----------------------scratch_emit_size-------------------------------------
3338 // Helper function that computes size by emitting code
3339 uint PhaseOutput::scratch_emit_size(const Node* n) {
3370 buf.insts()->set_scratch_emit();
3371 buf.stubs()->set_scratch_emit();
3372
3373 // Do the emission.
3374
3375 Label fakeL; // Fake label for branch instructions.
3376 Label* saveL = nullptr;
3377 uint save_bnum = 0;
3378 bool is_branch = n->is_MachBranch();
3379 C2_MacroAssembler masm(&buf);
3380 masm.bind(fakeL);
3381 if (is_branch) {
3382 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3383 n->as_MachBranch()->label_set(&fakeL, 0);
3384 }
3385 n->emit(&masm, C->regalloc());
3386
3387 // Emitting into the scratch buffer should not fail
3388 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3389
3390 if (is_branch) // Restore label.
3391 n->as_MachBranch()->label_set(saveL, save_bnum);
3392
3393 // End scratch_emit_size section.
3394 set_in_scratch_emit_size(false);
3395
3396 return buf.insts_size();
3397 }
3398
3399 void PhaseOutput::install() {
3400 if (!C->should_install_code()) {
3401 return;
3402 } else if (C->stub_function() != nullptr) {
3403 install_stub(C->stub_name());
3404 } else {
3405 install_code(C->method(),
3406 C->entry_bci(),
3407 CompileBroker::compiler2(),
3408 C->has_unsafe_access(),
3409 SharedRuntime::is_wide_vector(C->max_vector_size()));
3410 }
3411 }
3412
3413 void PhaseOutput::install_code(ciMethod* target,
3414 int entry_bci,
3415 AbstractCompiler* compiler,
3416 bool has_unsafe_access,
3417 bool has_wide_vectors) {
3418 // Check if we want to skip execution of all compiled code.
3419 {
3420 #ifndef PRODUCT
3421 if (OptoNoExecute) {
3422 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3423 return;
3424 }
3425 #endif
3426 Compile::TracePhase tp(_t_registerMethod);
3427
3428 if (C->is_osr_compilation()) {
3429 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3430 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3431 } else {
3432 if (!target->is_static()) {
3433 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3434 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3435 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3436 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3437 }
3438 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3439 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3440 }
3441
3442 C->env()->register_method(target,
3443 entry_bci,
3444 &_code_offsets,
3445 _orig_pc_slot_offset_in_bytes,
3446 code_buffer(),
3447 frame_size_in_words(),
3448 oop_map_set(),
3449 &_handler_table,
3450 inc_table(),
3451 compiler,
3452 has_unsafe_access,
3453 SharedRuntime::is_wide_vector(C->max_vector_size()),
3454 C->has_monitors(),
3455 C->has_scoped_access(),
3456 0);
3457
3458 if (C->log() != nullptr) { // Print code cache state into compiler log
3459 C->log()->code_cache_state();
3460 }
3461 }
3462 }
3463 void PhaseOutput::install_stub(const char* stub_name) {
3464 // Entry point will be accessed using stub_entry_point();
3465 if (code_buffer() == nullptr) {
3466 Matcher::soft_match_failure();
3467 } else {
3468 if (PrintAssembly && (WizardMode || Verbose))
3469 tty->print_cr("### Stub::%s", stub_name);
3470
3471 if (!C->failing()) {
3472 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3473
3474 // Make the NMethod
3475 // For now we mark the frame as never safe for profile stackwalking
3476 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/debugInfo.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compilerDirectives.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/gc_globals.hpp"
35 #include "gc/shared/c2/barrierSetC2.hpp"
36 #include "memory/allocation.hpp"
37 #include "opto/ad.hpp"
38 #include "opto/block.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/c2_MacroAssembler.hpp"
41 #include "opto/callnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/locknode.hpp"
44 #include "opto/machnode.hpp"
45 #include "opto/node.hpp"
46 #include "opto/optoreg.hpp"
47 #include "opto/output.hpp"
48 #include "opto/regalloc.hpp"
49 #include "opto/type.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "utilities/macros.hpp"
52 #include "utilities/powerOfTwo.hpp"
53 #include "utilities/xmlstream.hpp"
54
224 _first_block_size(0),
225 _handler_table(),
226 _inc_table(),
227 _stub_list(),
228 _oop_map_set(nullptr),
229 _scratch_buffer_blob(nullptr),
230 _scratch_locs_memory(nullptr),
231 _scratch_const_size(-1),
232 _in_scratch_emit_size(false),
233 _frame_slots(0),
234 _code_offsets(),
235 _node_bundling_limit(0),
236 _node_bundling_base(nullptr),
237 _orig_pc_slot(0),
238 _orig_pc_slot_offset_in_bytes(0),
239 _buf_sizes(),
240 _block(nullptr),
241 _index(0) {
242 C->set_output(this);
243 if (C->stub_name() == nullptr) {
244 int fixed_slots = C->fixed_slots();
245 if (C->needs_stack_repair()) {
246 fixed_slots -= 2;
247 }
248 // TODO 8284443 Only reserve extra slot if needed
249 if (InlineTypeReturnedAsFields) {
250 fixed_slots -= 2;
251 }
252 _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
253 }
254 }
255
256 PhaseOutput::~PhaseOutput() {
257 C->set_output(nullptr);
258 if (_scratch_buffer_blob != nullptr) {
259 BufferBlob::free(_scratch_buffer_blob);
260 }
261 }
262
263 void PhaseOutput::perform_mach_node_analysis() {
264 // Late barrier analysis must be done after schedule and bundle
265 // Otherwise liveness based spilling will fail
266 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
267 bs->late_barrier_analysis();
268
269 pd_perform_mach_node_analysis();
270
271 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
272 }
273
274 // Convert Nodes to instruction bits and pass off to the VM
275 void PhaseOutput::Output() {
276 // RootNode goes
277 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
278
279 // The number of new nodes (mostly MachNop) is proportional to
280 // the number of java calls and inner loops which are aligned.
281 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
282 C->inner_loops()*(OptoLoopAlignment-1)),
283 "out of nodes before code generation" ) ) {
284 return;
285 }
286 // Make sure I can find the Start Node
287 Block *entry = C->cfg()->get_block(1);
288 Block *broot = C->cfg()->get_root_block();
289
290 const StartNode *start = entry->head()->as_Start();
291
292 // Replace StartNode with prolog
293 Label verified_entry;
294 MachPrologNode* prolog = new MachPrologNode(&verified_entry);
295 entry->map_node(prolog, 0);
296 C->cfg()->map_node_to_block(prolog, entry);
297 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
298
299 // Virtual methods need an unverified entry point
300 if (C->is_osr_compilation()) {
301 if (PoisonOSREntry) {
302 // TODO: Should use a ShouldNotReachHereNode...
303 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
304 }
305 } else {
306 if (C->method()) {
307 if (C->method()->has_scalarized_args()) {
308 // Add entry point to unpack all inline type arguments
309 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
310 if (!C->method()->is_static()) {
311 // Add verified/unverified entry points to only unpack inline type receiver at interface calls
312 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
313 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
314 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
315 }
316 } else if (!C->method()->is_static()) {
317 // Insert unvalidated entry point
318 C->cfg()->insert(broot, 0, new MachUEPNode());
319 }
320 }
321 }
322
323 // Break before main entry point
324 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
325 (OptoBreakpoint && C->is_method_compilation()) ||
326 (OptoBreakpointOSR && C->is_osr_compilation()) ||
327 (OptoBreakpointC2R && !C->method()) ) {
328 // checking for C->method() means that OptoBreakpoint does not apply to
329 // runtime stubs or frame converters
330 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
331 }
332
333 // Insert epilogs before every return
334 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
335 Block* block = C->cfg()->get_block(i);
336 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
337 Node* m = block->end();
338 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
339 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
340 block->add_inst(epilog);
341 C->cfg()->map_node_to_block(epilog, block);
342 }
343 }
344 }
345
346 // Keeper of sizing aspects
347 _buf_sizes = BufferSizingData();
348
349 // Initialize code buffer
350 estimate_buffer_size(_buf_sizes._const);
351 if (C->failing()) return;
352
353 // Pre-compute the length of blocks and replace
354 // long branches with short if machine supports it.
355 // Must be done before ScheduleAndBundle due to SPARC delay slots
356 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
357 blk_starts[0] = 0;
358 shorten_branches(blk_starts);
359
360 if (!C->is_osr_compilation() && C->has_scalarized_args()) {
361 // Compute the offsets of the entry points required by the inline type calling convention
362 if (!C->method()->is_static()) {
363 // We have entries at the beginning of the method, implemented by the first 4 nodes.
364 // Entry (unverified) @ offset 0
365 // Verified_Inline_Entry_RO
366 // Inline_Entry (unverified)
367 // Verified_Inline_Entry
368 uint offset = 0;
369 _code_offsets.set_value(CodeOffsets::Entry, offset);
370
371 offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
372 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
373
374 offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
375 _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
376
377 offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
378 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
379 } else {
380 _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
381 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
382 }
383 }
384
385 ScheduleAndBundle();
386 if (C->failing()) {
387 return;
388 }
389
390 perform_mach_node_analysis();
391
392 // Complete sizing of codebuffer
393 CodeBuffer* cb = init_buffer();
394 if (cb == nullptr || C->failing()) {
395 return;
396 }
397
398 BuildOopMaps();
399
400 if (C->failing()) {
401 return;
402 }
403
404 C2_MacroAssembler masm(cb);
526 // Sum all instruction sizes to compute block size
527 uint last_inst = block->number_of_nodes();
528 uint blk_size = 0;
529 for (uint j = 0; j < last_inst; j++) {
530 _index = j;
531 Node* nj = block->get_node(_index);
532 // Handle machine instruction nodes
533 if (nj->is_Mach()) {
534 MachNode* mach = nj->as_Mach();
535 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
536 reloc_size += mach->reloc();
537 if (mach->is_MachCall()) {
538 // add size information for trampoline stub
539 // class CallStubImpl is platform-specific and defined in the *.ad files.
540 stub_size += CallStubImpl::size_call_trampoline();
541 reloc_size += CallStubImpl::reloc_call_trampoline();
542
543 MachCallNode *mcall = mach->as_MachCall();
544 // This destination address is NOT PC-relative
545
546 if (mcall->entry_point() != nullptr) {
547 mcall->method_set((intptr_t)mcall->entry_point());
548 }
549
550 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
551 stub_size += CompiledDirectCall::to_interp_stub_size();
552 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
553 }
554 } else if (mach->is_MachSafePoint()) {
555 // If call/safepoint are adjacent, account for possible
556 // nop to disambiguate the two safepoints.
557 // ScheduleAndBundle() can rearrange nodes in a block,
558 // check for all offsets inside this block.
559 if (last_call_adr >= blk_starts[i]) {
560 blk_size += nop_size;
561 }
562 }
563 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
564 // Nop is inserted between "avoid back to back" instructions.
565 // ScheduleAndBundle() can rearrange nodes in a block,
566 // check for all offsets inside this block.
567 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
568 blk_size += nop_size;
783 // New functionality:
784 // Assert if the local is not top. In product mode let the new node
785 // override the old entry.
786 assert(local == C->top(), "LocArray collision");
787 if (local == C->top()) {
788 return;
789 }
790 array->pop();
791 }
792 const Type *t = local->bottom_type();
793
794 // Is it a safepoint scalar object node?
795 if (local->is_SafePointScalarObject()) {
796 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
797
798 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
799 if (sv == nullptr) {
800 ciKlass* cik = t->is_oopptr()->exact_klass();
801 assert(cik->is_instance_klass() ||
802 cik->is_array_klass(), "Not supported allocation.");
803 uint first_ind = spobj->first_index(sfpt->jvms());
804 // Nullable, scalarized inline types have an is_init input
805 // that needs to be checked before using the field values.
806 ScopeValue* is_init = nullptr;
807 if (cik->is_inlinetype()) {
808 Node* init_node = sfpt->in(first_ind++);
809 assert(init_node != nullptr, "is_init node not found");
810 if (!init_node->is_top()) {
811 const TypeInt* init_type = init_node->bottom_type()->is_int();
812 if (init_node->is_Con()) {
813 is_init = new ConstantIntValue(init_type->get_con());
814 } else {
815 OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
816 is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
817 }
818 }
819 }
820 sv = new ObjectValue(spobj->_idx,
821 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, is_init);
822 set_sv_for_object_node(objs, sv);
823
824 for (uint i = 0; i < spobj->n_fields(); i++) {
825 Node* fld_node = sfpt->in(first_ind+i);
826 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
827 }
828 }
829 array->append(sv);
830 return;
831 } else if (local->is_SafePointScalarMerge()) {
832 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
833 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
834
835 if (mv == nullptr) {
836 GrowableArray<ScopeValue*> deps;
837
838 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
839 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
840 assert(deps.length() == 1, "missing value");
841
842 int selector_idx = smerge->selector_idx(sfpt->jvms());
843 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
1050 continue;
1051 }
1052
1053 ObjectValue* other = sv_for_node_id(objs, n->_idx);
1054 if (ov == other) {
1055 return true;
1056 }
1057 }
1058 return false;
1059 }
1060
1061 //--------------------------Process_OopMap_Node--------------------------------
1062 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1063 // Handle special safepoint nodes for synchronization
1064 MachSafePointNode *sfn = mach->as_MachSafePoint();
1065 MachCallNode *mcall;
1066
1067 int safepoint_pc_offset = current_offset;
1068 bool is_method_handle_invoke = false;
1069 bool return_oop = false;
1070 bool return_scalarized = false;
1071 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1072 bool arg_escape = false;
1073
1074 // Add the safepoint in the DebugInfoRecorder
1075 if( !mach->is_MachCall() ) {
1076 mcall = nullptr;
1077 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1078 } else {
1079 mcall = mach->as_MachCall();
1080
1081 // Is the call a MethodHandle call?
1082 if (mcall->is_MachCallJava()) {
1083 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1084 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1085 is_method_handle_invoke = true;
1086 }
1087 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1088 }
1089
1090 // Check if a call returns an object.
1091 if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1092 return_oop = true;
1093 }
1094 if (mcall->returns_scalarized()) {
1095 return_scalarized = true;
1096 }
1097 safepoint_pc_offset += mcall->ret_addr_offset();
1098 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1099 }
1100
1101 // Loop over the JVMState list to add scope information
1102 // Do not skip safepoints with a null method, they need monitor info
1103 JVMState* youngest_jvms = sfn->jvms();
1104 int max_depth = youngest_jvms->depth();
1105
1106 // Allocate the object pool for scalar-replaced objects -- the map from
1107 // small-integer keys (which can be recorded in the local and ostack
1108 // arrays) to descriptions of the object state.
1109 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1110
1111 // Visit scopes from oldest to youngest.
1112 for (int depth = 1; depth <= max_depth; depth++) {
1113 JVMState* jvms = youngest_jvms->of_depth(depth);
1114 int idx;
1115 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1116 // Safepoints that do not have method() set only provide oop-map and monitor info
1255 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1256 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1257
1258 // Make method available for all Safepoints
1259 ciMethod* scope_method = method ? method : C->method();
1260 // Describe the scope here
1261 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1262 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1263 // Now we can describe the scope.
1264 methodHandle null_mh;
1265 bool rethrow_exception = false;
1266 C->debug_info()->describe_scope(
1267 safepoint_pc_offset,
1268 null_mh,
1269 scope_method,
1270 jvms->bci(),
1271 jvms->should_reexecute(),
1272 rethrow_exception,
1273 is_method_handle_invoke,
1274 return_oop,
1275 return_scalarized,
1276 has_ea_local_in_scope,
1277 arg_escape,
1278 locvals,
1279 expvals,
1280 monvals
1281 );
1282 } // End jvms loop
1283
1284 // Mark the end of the scope set.
1285 C->debug_info()->end_safepoint(safepoint_pc_offset);
1286 }
1287
1288
1289
1290 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1291 class NonSafepointEmitter {
1292 Compile* C;
1293 JVMState* _pending_jvms;
1294 int _pending_offset;
1295
1631 MachNode *nop = new MachNopNode(nops_cnt);
1632 block->insert_node(nop, j++);
1633 last_inst++;
1634 C->cfg()->map_node_to_block(nop, block);
1635 // Ensure enough space.
1636 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1637 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1638 C->record_failure("CodeCache is full");
1639 return;
1640 }
1641 nop->emit(masm, C->regalloc());
1642 masm->code()->flush_bundle(true);
1643 current_offset = masm->offset();
1644 }
1645
1646 bool observe_safepoint = is_sfn;
1647 // Remember the start of the last call in a basic block
1648 if (is_mcall) {
1649 MachCallNode *mcall = mach->as_MachCall();
1650
1651 if (mcall->entry_point() != nullptr) {
1652 // This destination address is NOT PC-relative
1653 mcall->method_set((intptr_t)mcall->entry_point());
1654 }
1655
1656 // Save the return address
1657 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1658
1659 observe_safepoint = mcall->guaranteed_safepoint();
1660 }
1661
1662 // sfn will be valid whenever mcall is valid now because of inheritance
1663 if (observe_safepoint) {
1664 // Handle special safepoint nodes for synchronization
1665 if (!is_mcall) {
1666 MachSafePointNode *sfn = mach->as_MachSafePoint();
1667 // !!!!! Stubs only need an oopmap right now, so bail out
1668 if (sfn->jvms()->method() == nullptr) {
1669 // Write the oopmap directly to the code blob??!!
1670 continue;
1671 }
1672 } // End synchronization
1673
1674 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1775 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1776 node_offsets[n->_idx] = masm->offset();
1777 }
1778 #endif
1779 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1780
1781 // "Normal" instruction case
1782 DEBUG_ONLY(uint instr_offset = masm->offset());
1783 n->emit(masm, C->regalloc());
1784 current_offset = masm->offset();
1785
1786 // Above we only verified that there is enough space in the instruction section.
1787 // However, the instruction may emit stubs that cause code buffer expansion.
1788 // Bail out here if expansion failed due to a lack of code cache space.
1789 if (C->failing()) {
1790 return;
1791 }
1792
1793 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1794 "ret_addr_offset() not within emitted code");
1795 #ifdef ASSERT
1796 uint n_size = n->size(C->regalloc());
1797 if (n_size < (current_offset-instr_offset)) {
1798 MachNode* mach = n->as_Mach();
1799 n->dump();
1800 mach->dump_format(C->regalloc(), tty);
1801 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1802 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1803 tty->print_cr(" ------------------- ");
1804 BufferBlob* blob = this->scratch_buffer_blob();
1805 address blob_begin = blob->content_begin();
1806 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1807 assert(false, "wrong size of mach node");
1808 }
1809 #endif
1810 non_safepoints.observe_instruction(n, current_offset);
1811
1812 // mcall is last "call" that can be a safepoint
1813 // record it so we can see if a poll will directly follow it
1814 // in which case we'll need a pad to make the PcDesc sites unique
3210 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3211 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3212 }
3213 }
3214 // Do not allow defs of new derived values to float above GC
3215 // points unless the base is definitely available at the GC point.
3216
3217 Node *m = b->get_node(i);
3218
3219 // Add precedence edge from following safepoint to use of derived pointer
3220 if( last_safept_node != end_node &&
3221 m != last_safept_node) {
3222 for (uint k = 1; k < m->req(); k++) {
3223 const Type *t = m->in(k)->bottom_type();
3224 if( t->isa_oop_ptr() &&
3225 t->is_ptr()->offset() != 0 ) {
3226 last_safept_node->add_prec( m );
3227 break;
3228 }
3229 }
3230
3231 // Do not allow a CheckCastPP node whose input is a raw pointer to
3232 // float past a safepoint. This can occur when a buffered inline
3233 // type is allocated in a loop and the CheckCastPP from that
3234 // allocation is reused outside the loop. If the use inside the
3235 // loop is scalarized the CheckCastPP will no longer be connected
3236 // to the loop safepoint. See JDK-8264340.
3237 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3238 Node *def = m->in(1);
3239 if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3240 last_safept_node->add_prec(m);
3241 }
3242 }
3243 }
3244
3245 if( n->jvms() ) { // Precedence edge from derived to safept
3246 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3247 if( b->get_node(last_safept) != last_safept_node ) {
3248 last_safept = b->find_node(last_safept_node);
3249 }
3250 for( uint j=last_safept; j > i; j-- ) {
3251 Node *mach = b->get_node(j);
3252 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3253 mach->add_prec( n );
3254 }
3255 last_safept = i;
3256 last_safept_node = m;
3257 }
3258 }
3259
3260 if (fat_proj_seen) {
3261 // Garbage collect pinch nodes that were not consumed.
3262 // They are usually created by a fat kill MachProj for a call.
3381 }
3382 #endif
3383
3384 //-----------------------init_scratch_buffer_blob------------------------------
3385 // Construct a temporary BufferBlob and cache it for this compile.
3386 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3387 // If there is already a scratch buffer blob allocated and the
3388 // constant section is big enough, use it. Otherwise free the
3389 // current and allocate a new one.
3390 BufferBlob* blob = scratch_buffer_blob();
3391 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3392 // Use the current blob.
3393 } else {
3394 if (blob != nullptr) {
3395 BufferBlob::free(blob);
3396 }
3397
3398 ResourceMark rm;
3399 _scratch_const_size = const_size;
3400 int size = C2Compiler::initial_code_buffer_size(const_size);
3401 if (C->has_scalarized_args()) {
3402 // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3403 // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3404 ciMethod* method = C->method();
3405 int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3406 int arg_num = 0;
3407 if (!method->is_static()) {
3408 if (method->is_scalarized_arg(arg_num)) {
3409 size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3410 }
3411 arg_num++;
3412 }
3413 for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3414 if (method->is_scalarized_arg(arg_num)) {
3415 size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3416 }
3417 arg_num++;
3418 }
3419 }
3420 blob = BufferBlob::create("Compile::scratch_buffer", size);
3421 // Record the buffer blob for next time.
3422 set_scratch_buffer_blob(blob);
3423 // Have we run out of code space?
3424 if (scratch_buffer_blob() == nullptr) {
3425 // Let CompilerBroker disable further compilations.
3426 C->record_failure("Not enough space for scratch buffer in CodeCache");
3427 return;
3428 }
3429 }
3430
3431 // Initialize the relocation buffers
3432 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3433 set_scratch_locs_memory(locs_buf);
3434 }
3435
3436
3437 //-----------------------scratch_emit_size-------------------------------------
3438 // Helper function that computes size by emitting code
3439 uint PhaseOutput::scratch_emit_size(const Node* n) {
3470 buf.insts()->set_scratch_emit();
3471 buf.stubs()->set_scratch_emit();
3472
3473 // Do the emission.
3474
3475 Label fakeL; // Fake label for branch instructions.
3476 Label* saveL = nullptr;
3477 uint save_bnum = 0;
3478 bool is_branch = n->is_MachBranch();
3479 C2_MacroAssembler masm(&buf);
3480 masm.bind(fakeL);
3481 if (is_branch) {
3482 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3483 n->as_MachBranch()->label_set(&fakeL, 0);
3484 }
3485 n->emit(&masm, C->regalloc());
3486
3487 // Emitting into the scratch buffer should not fail
3488 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3489
3490 // Restore label.
3491 if (is_branch) {
3492 n->as_MachBranch()->label_set(saveL, save_bnum);
3493 }
3494
3495 // End scratch_emit_size section.
3496 set_in_scratch_emit_size(false);
3497
3498 return buf.insts_size();
3499 }
3500
3501 void PhaseOutput::install() {
3502 if (!C->should_install_code()) {
3503 return;
3504 } else if (C->stub_function() != nullptr) {
3505 install_stub(C->stub_name());
3506 } else {
3507 install_code(C->method(),
3508 C->entry_bci(),
3509 CompileBroker::compiler2(),
3510 C->has_unsafe_access(),
3511 SharedRuntime::is_wide_vector(C->max_vector_size()));
3512 }
3513 }
3514
3515 void PhaseOutput::install_code(ciMethod* target,
3516 int entry_bci,
3517 AbstractCompiler* compiler,
3518 bool has_unsafe_access,
3519 bool has_wide_vectors) {
3520 // Check if we want to skip execution of all compiled code.
3521 {
3522 #ifndef PRODUCT
3523 if (OptoNoExecute) {
3524 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3525 return;
3526 }
3527 #endif
3528 Compile::TracePhase tp(_t_registerMethod);
3529
3530 if (C->is_osr_compilation()) {
3531 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3532 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3533 } else {
3534 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3535 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3536 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3537 }
3538 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3539 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3540 }
3541 if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3542 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3543 }
3544 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3545 }
3546
3547 C->env()->register_method(target,
3548 entry_bci,
3549 &_code_offsets,
3550 _orig_pc_slot_offset_in_bytes,
3551 code_buffer(),
3552 frame_size_in_words(),
3553 _oop_map_set,
3554 &_handler_table,
3555 inc_table(),
3556 compiler,
3557 has_unsafe_access,
3558 SharedRuntime::is_wide_vector(C->max_vector_size()),
3559 C->has_monitors(),
3560 C->has_scoped_access(),
3561 0);
3562
3563 if (C->log() != nullptr) { // Print code cache state into compiler log
3564 C->log()->code_cache_state();
3565 }
3566 }
3567 }
3568 void PhaseOutput::install_stub(const char* stub_name) {
3569 // Entry point will be accessed using stub_entry_point();
3570 if (code_buffer() == nullptr) {
3571 Matcher::soft_match_failure();
3572 } else {
3573 if (PrintAssembly && (WizardMode || Verbose))
3574 tty->print_cr("### Stub::%s", stub_name);
3575
3576 if (!C->failing()) {
3577 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3578
3579 // Make the NMethod
3580 // For now we mark the frame as never safe for profile stackwalking
3581 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|