16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/c2/barrierSetC2.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/allocation.hpp"
39 #include "opto/ad.hpp"
40 #include "opto/block.hpp"
41 #include "opto/c2compiler.hpp"
42 #include "opto/c2_MacroAssembler.hpp"
43 #include "opto/callnode.hpp"
44 #include "opto/cfgnode.hpp"
45 #include "opto/locknode.hpp"
46 #include "opto/machnode.hpp"
47 #include "opto/node.hpp"
48 #include "opto/optoreg.hpp"
49 #include "opto/output.hpp"
50 #include "opto/regalloc.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/type.hpp"
54 #include "runtime/handles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
229 _first_block_size(0),
230 _handler_table(),
231 _inc_table(),
232 _stub_list(),
233 _oop_map_set(nullptr),
234 _scratch_buffer_blob(nullptr),
235 _scratch_locs_memory(nullptr),
236 _scratch_const_size(-1),
237 _in_scratch_emit_size(false),
238 _frame_slots(0),
239 _code_offsets(),
240 _node_bundling_limit(0),
241 _node_bundling_base(nullptr),
242 _orig_pc_slot(0),
243 _orig_pc_slot_offset_in_bytes(0),
244 _buf_sizes(),
245 _block(nullptr),
246 _index(0) {
247 C->set_output(this);
248 if (C->stub_name() == nullptr) {
249 _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
250 }
251 }
252
253 PhaseOutput::~PhaseOutput() {
254 C->set_output(nullptr);
255 if (_scratch_buffer_blob != nullptr) {
256 BufferBlob::free(_scratch_buffer_blob);
257 }
258 }
259
260 void PhaseOutput::perform_mach_node_analysis() {
261 // Late barrier analysis must be done after schedule and bundle
262 // Otherwise liveness based spilling will fail
263 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
264 bs->late_barrier_analysis();
265
266 pd_perform_mach_node_analysis();
267
268 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
269 }
270
271 // Convert Nodes to instruction bits and pass off to the VM
272 void PhaseOutput::Output() {
273 // RootNode goes
274 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
275
276 // The number of new nodes (mostly MachNop) is proportional to
277 // the number of java calls and inner loops which are aligned.
278 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
279 C->inner_loops()*(OptoLoopAlignment-1)),
280 "out of nodes before code generation" ) ) {
281 return;
282 }
283 // Make sure I can find the Start Node
284 Block *entry = C->cfg()->get_block(1);
285 Block *broot = C->cfg()->get_root_block();
286
287 const StartNode *start = entry->head()->as_Start();
288
289 // Replace StartNode with prolog
290 MachPrologNode *prolog = new MachPrologNode();
291 entry->map_node(prolog, 0);
292 C->cfg()->map_node_to_block(prolog, entry);
293 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
294
295 // Virtual methods need an unverified entry point
296
297 if( C->is_osr_compilation() ) {
298 if( PoisonOSREntry ) {
299 // TODO: Should use a ShouldNotReachHereNode...
300 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
301 }
302 } else {
303 if( C->method() && !C->method()->flags().is_static() ) {
304 // Insert unvalidated entry point
305 C->cfg()->insert( broot, 0, new MachUEPNode() );
306 }
307
308 }
309
310 // Break before main entry point
311 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
312 (OptoBreakpoint && C->is_method_compilation()) ||
313 (OptoBreakpointOSR && C->is_osr_compilation()) ||
314 (OptoBreakpointC2R && !C->method()) ) {
315 // checking for C->method() means that OptoBreakpoint does not apply to
316 // runtime stubs or frame converters
317 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
318 }
319
320 // Insert epilogs before every return
321 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
322 Block* block = C->cfg()->get_block(i);
323 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
324 Node* m = block->end();
325 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
326 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
327 block->add_inst(epilog);
328 C->cfg()->map_node_to_block(epilog, block);
329 }
330 }
331 }
332
333 // Keeper of sizing aspects
334 _buf_sizes = BufferSizingData();
335
336 // Initialize code buffer
337 estimate_buffer_size(_buf_sizes._const);
338 if (C->failing()) return;
339
340 // Pre-compute the length of blocks and replace
341 // long branches with short if machine supports it.
342 // Must be done before ScheduleAndBundle due to SPARC delay slots
343 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
344 blk_starts[0] = 0;
345 shorten_branches(blk_starts);
346
347 ScheduleAndBundle();
348 if (C->failing()) {
349 return;
350 }
351
352 perform_mach_node_analysis();
353
354 // Complete sizing of codebuffer
355 CodeBuffer* cb = init_buffer();
356 if (cb == nullptr || C->failing()) {
357 return;
358 }
359
360 BuildOopMaps();
361
362 if (C->failing()) {
363 return;
364 }
365
366 C2_MacroAssembler masm(cb);
488 // Sum all instruction sizes to compute block size
489 uint last_inst = block->number_of_nodes();
490 uint blk_size = 0;
491 for (uint j = 0; j < last_inst; j++) {
492 _index = j;
493 Node* nj = block->get_node(_index);
494 // Handle machine instruction nodes
495 if (nj->is_Mach()) {
496 MachNode* mach = nj->as_Mach();
497 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
498 reloc_size += mach->reloc();
499 if (mach->is_MachCall()) {
500 // add size information for trampoline stub
501 // class CallStubImpl is platform-specific and defined in the *.ad files.
502 stub_size += CallStubImpl::size_call_trampoline();
503 reloc_size += CallStubImpl::reloc_call_trampoline();
504
505 MachCallNode *mcall = mach->as_MachCall();
506 // This destination address is NOT PC-relative
507
508 mcall->method_set((intptr_t)mcall->entry_point());
509
510 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
511 stub_size += CompiledDirectCall::to_interp_stub_size();
512 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
513 }
514 } else if (mach->is_MachSafePoint()) {
515 // If call/safepoint are adjacent, account for possible
516 // nop to disambiguate the two safepoints.
517 // ScheduleAndBundle() can rearrange nodes in a block,
518 // check for all offsets inside this block.
519 if (last_call_adr >= blk_starts[i]) {
520 blk_size += nop_size;
521 }
522 }
523 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
524 // Nop is inserted between "avoid back to back" instructions.
525 // ScheduleAndBundle() can rearrange nodes in a block,
526 // check for all offsets inside this block.
527 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
528 blk_size += nop_size;
743 // New functionality:
744 // Assert if the local is not top. In product mode let the new node
745 // override the old entry.
746 assert(local == C->top(), "LocArray collision");
747 if (local == C->top()) {
748 return;
749 }
750 array->pop();
751 }
752 const Type *t = local->bottom_type();
753
754 // Is it a safepoint scalar object node?
755 if (local->is_SafePointScalarObject()) {
756 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
757
758 ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
759 if (sv == nullptr) {
760 ciKlass* cik = t->is_oopptr()->exact_klass();
761 assert(cik->is_instance_klass() ||
762 cik->is_array_klass(), "Not supported allocation.");
763 sv = new ObjectValue(spobj->_idx,
764 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
765 set_sv_for_object_node(objs, sv);
766
767 uint first_ind = spobj->first_index(sfpt->jvms());
768 for (uint i = 0; i < spobj->n_fields(); i++) {
769 Node* fld_node = sfpt->in(first_ind+i);
770 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
771 }
772 }
773 array->append(sv);
774 return;
775 } else if (local->is_SafePointScalarMerge()) {
776 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
777 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
778
779 if (mv == nullptr) {
780 GrowableArray<ScopeValue*> deps;
781
782 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
783 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
784 assert(deps.length() == 1, "missing value");
785
786 int selector_idx = smerge->selector_idx(sfpt->jvms());
787 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
966 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
967 for (int k = 0; k < monarray->length(); k++) {
968 MonitorValue* mv = monarray->at(k);
969 if (mv->owner() == ov) {
970 return true;
971 }
972 }
973
974 return false;
975 }
976
977 //--------------------------Process_OopMap_Node--------------------------------
978 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
979 // Handle special safepoint nodes for synchronization
980 MachSafePointNode *sfn = mach->as_MachSafePoint();
981 MachCallNode *mcall;
982
983 int safepoint_pc_offset = current_offset;
984 bool is_method_handle_invoke = false;
985 bool return_oop = false;
986 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
987 bool arg_escape = false;
988
989 // Add the safepoint in the DebugInfoRecorder
990 if( !mach->is_MachCall() ) {
991 mcall = nullptr;
992 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
993 } else {
994 mcall = mach->as_MachCall();
995
996 // Is the call a MethodHandle call?
997 if (mcall->is_MachCallJava()) {
998 if (mcall->as_MachCallJava()->_method_handle_invoke) {
999 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1000 is_method_handle_invoke = true;
1001 }
1002 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1003 }
1004
1005 // Check if a call returns an object.
1006 if (mcall->returns_pointer()) {
1007 return_oop = true;
1008 }
1009 safepoint_pc_offset += mcall->ret_addr_offset();
1010 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1011 }
1012
1013 // Loop over the JVMState list to add scope information
1014 // Do not skip safepoints with a null method, they need monitor info
1015 JVMState* youngest_jvms = sfn->jvms();
1016 int max_depth = youngest_jvms->depth();
1017
1018 // Allocate the object pool for scalar-replaced objects -- the map from
1019 // small-integer keys (which can be recorded in the local and ostack
1020 // arrays) to descriptions of the object state.
1021 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1022
1023 // Visit scopes from oldest to youngest.
1024 for (int depth = 1; depth <= max_depth; depth++) {
1025 JVMState* jvms = youngest_jvms->of_depth(depth);
1026 int idx;
1027 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1028 // Safepoints that do not have method() set only provide oop-map and monitor info
1151 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1152 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1153
1154 // Make method available for all Safepoints
1155 ciMethod* scope_method = method ? method : C->method();
1156 // Describe the scope here
1157 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1158 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1159 // Now we can describe the scope.
1160 methodHandle null_mh;
1161 bool rethrow_exception = false;
1162 C->debug_info()->describe_scope(
1163 safepoint_pc_offset,
1164 null_mh,
1165 scope_method,
1166 jvms->bci(),
1167 jvms->should_reexecute(),
1168 rethrow_exception,
1169 is_method_handle_invoke,
1170 return_oop,
1171 has_ea_local_in_scope,
1172 arg_escape,
1173 locvals,
1174 expvals,
1175 monvals
1176 );
1177 } // End jvms loop
1178
1179 // Mark the end of the scope set.
1180 C->debug_info()->end_safepoint(safepoint_pc_offset);
1181 }
1182
1183
1184
1185 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1186 class NonSafepointEmitter {
1187 Compile* C;
1188 JVMState* _pending_jvms;
1189 int _pending_offset;
1190
1525 MachNode *nop = new MachNopNode(nops_cnt);
1526 block->insert_node(nop, j++);
1527 last_inst++;
1528 C->cfg()->map_node_to_block(nop, block);
1529 // Ensure enough space.
1530 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1532 C->record_failure("CodeCache is full");
1533 return;
1534 }
1535 nop->emit(masm, C->regalloc());
1536 masm->code()->flush_bundle(true);
1537 current_offset = masm->offset();
1538 }
1539
1540 bool observe_safepoint = is_sfn;
1541 // Remember the start of the last call in a basic block
1542 if (is_mcall) {
1543 MachCallNode *mcall = mach->as_MachCall();
1544
1545 // This destination address is NOT PC-relative
1546 mcall->method_set((intptr_t)mcall->entry_point());
1547
1548 // Save the return address
1549 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1550
1551 observe_safepoint = mcall->guaranteed_safepoint();
1552 }
1553
1554 // sfn will be valid whenever mcall is valid now because of inheritance
1555 if (observe_safepoint) {
1556 // Handle special safepoint nodes for synchronization
1557 if (!is_mcall) {
1558 MachSafePointNode *sfn = mach->as_MachSafePoint();
1559 // !!!!! Stubs only need an oopmap right now, so bail out
1560 if (sfn->jvms()->method() == nullptr) {
1561 // Write the oopmap directly to the code blob??!!
1562 continue;
1563 }
1564 } // End synchronization
1565
1566 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1690 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1691 node_offsets[n->_idx] = masm->offset();
1692 }
1693 #endif
1694 assert(!C->failing(), "Should not reach here if failing.");
1695
1696 // "Normal" instruction case
1697 DEBUG_ONLY(uint instr_offset = masm->offset());
1698 n->emit(masm, C->regalloc());
1699 current_offset = masm->offset();
1700
1701 // Above we only verified that there is enough space in the instruction section.
1702 // However, the instruction may emit stubs that cause code buffer expansion.
1703 // Bail out here if expansion failed due to a lack of code cache space.
1704 if (C->failing()) {
1705 return;
1706 }
1707
1708 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1709 "ret_addr_offset() not within emitted code");
1710
1711 #ifdef ASSERT
1712 uint n_size = n->size(C->regalloc());
1713 if (n_size < (current_offset-instr_offset)) {
1714 MachNode* mach = n->as_Mach();
1715 n->dump();
1716 mach->dump_format(C->regalloc(), tty);
1717 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1718 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1719 tty->print_cr(" ------------------- ");
1720 BufferBlob* blob = this->scratch_buffer_blob();
1721 address blob_begin = blob->content_begin();
1722 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1723 assert(false, "wrong size of mach node");
1724 }
1725 #endif
1726 non_safepoints.observe_instruction(n, current_offset);
1727
1728 // mcall is last "call" that can be a safepoint
1729 // record it so we can see if a poll will directly follow it
1730 // in which case we'll need a pad to make the PcDesc sites unique
3124 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3125 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3126 }
3127 }
3128 // Do not allow defs of new derived values to float above GC
3129 // points unless the base is definitely available at the GC point.
3130
3131 Node *m = b->get_node(i);
3132
3133 // Add precedence edge from following safepoint to use of derived pointer
3134 if( last_safept_node != end_node &&
3135 m != last_safept_node) {
3136 for (uint k = 1; k < m->req(); k++) {
3137 const Type *t = m->in(k)->bottom_type();
3138 if( t->isa_oop_ptr() &&
3139 t->is_ptr()->offset() != 0 ) {
3140 last_safept_node->add_prec( m );
3141 break;
3142 }
3143 }
3144 }
3145
3146 if( n->jvms() ) { // Precedence edge from derived to safept
3147 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3148 if( b->get_node(last_safept) != last_safept_node ) {
3149 last_safept = b->find_node(last_safept_node);
3150 }
3151 for( uint j=last_safept; j > i; j-- ) {
3152 Node *mach = b->get_node(j);
3153 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3154 mach->add_prec( n );
3155 }
3156 last_safept = i;
3157 last_safept_node = m;
3158 }
3159 }
3160
3161 if (fat_proj_seen) {
3162 // Garbage collect pinch nodes that were not consumed.
3163 // They are usually created by a fat kill MachProj for a call.
3282 }
3283 #endif
3284
3285 //-----------------------init_scratch_buffer_blob------------------------------
3286 // Construct a temporary BufferBlob and cache it for this compile.
3287 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3288 // If there is already a scratch buffer blob allocated and the
3289 // constant section is big enough, use it. Otherwise free the
3290 // current and allocate a new one.
3291 BufferBlob* blob = scratch_buffer_blob();
3292 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3293 // Use the current blob.
3294 } else {
3295 if (blob != nullptr) {
3296 BufferBlob::free(blob);
3297 }
3298
3299 ResourceMark rm;
3300 _scratch_const_size = const_size;
3301 int size = C2Compiler::initial_code_buffer_size(const_size);
3302 blob = BufferBlob::create("Compile::scratch_buffer", size);
3303 // Record the buffer blob for next time.
3304 set_scratch_buffer_blob(blob);
3305 // Have we run out of code space?
3306 if (scratch_buffer_blob() == nullptr) {
3307 // Let CompilerBroker disable further compilations.
3308 C->record_failure("Not enough space for scratch buffer in CodeCache");
3309 return;
3310 }
3311 }
3312
3313 // Initialize the relocation buffers
3314 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3315 set_scratch_locs_memory(locs_buf);
3316 }
3317
3318
3319 //-----------------------scratch_emit_size-------------------------------------
3320 // Helper function that computes size by emitting code
3321 uint PhaseOutput::scratch_emit_size(const Node* n) {
3352 buf.insts()->set_scratch_emit();
3353 buf.stubs()->set_scratch_emit();
3354
3355 // Do the emission.
3356
3357 Label fakeL; // Fake label for branch instructions.
3358 Label* saveL = nullptr;
3359 uint save_bnum = 0;
3360 bool is_branch = n->is_MachBranch();
3361 C2_MacroAssembler masm(&buf);
3362 masm.bind(fakeL);
3363 if (is_branch) {
3364 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3365 n->as_MachBranch()->label_set(&fakeL, 0);
3366 }
3367 n->emit(&masm, C->regalloc());
3368
3369 // Emitting into the scratch buffer should not fail
3370 assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3371
3372 if (is_branch) // Restore label.
3373 n->as_MachBranch()->label_set(saveL, save_bnum);
3374
3375 // End scratch_emit_size section.
3376 set_in_scratch_emit_size(false);
3377
3378 return buf.insts_size();
3379 }
3380
3381 void PhaseOutput::install() {
3382 if (!C->should_install_code()) {
3383 return;
3384 } else if (C->stub_function() != nullptr) {
3385 install_stub(C->stub_name());
3386 } else {
3387 install_code(C->method(),
3388 C->entry_bci(),
3389 CompileBroker::compiler2(),
3390 C->has_unsafe_access(),
3391 SharedRuntime::is_wide_vector(C->max_vector_size()));
3392 }
3393 }
3394
3395 void PhaseOutput::install_code(ciMethod* target,
3396 int entry_bci,
3397 AbstractCompiler* compiler,
3398 bool has_unsafe_access,
3399 bool has_wide_vectors) {
3400 // Check if we want to skip execution of all compiled code.
3401 {
3402 #ifndef PRODUCT
3403 if (OptoNoExecute) {
3404 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3405 return;
3406 }
3407 #endif
3408 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3409
3410 if (C->is_osr_compilation()) {
3411 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3412 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3413 } else {
3414 if (!target->is_static()) {
3415 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3416 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3417 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3418 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3419 }
3420 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3421 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3422 }
3423
3424 C->env()->register_method(target,
3425 entry_bci,
3426 &_code_offsets,
3427 _orig_pc_slot_offset_in_bytes,
3428 code_buffer(),
3429 frame_size_in_words(),
3430 oop_map_set(),
3431 &_handler_table,
3432 inc_table(),
3433 compiler,
3434 has_unsafe_access,
3435 SharedRuntime::is_wide_vector(C->max_vector_size()),
3436 C->has_monitors(),
3437 0);
3438
3439 if (C->log() != nullptr) { // Print code cache state into compiler log
3440 C->log()->code_cache_state();
3441 }
3442 }
3443 }
3444 void PhaseOutput::install_stub(const char* stub_name) {
3445 // Entry point will be accessed using stub_entry_point();
3446 if (code_buffer() == nullptr) {
3447 Matcher::soft_match_failure();
3448 } else {
3449 if (PrintAssembly && (WizardMode || Verbose))
3450 tty->print_cr("### Stub::%s", stub_name);
3451
3452 if (!C->failing()) {
3453 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3454
3455 // Make the NMethod
3456 // For now we mark the frame as never safe for profile stackwalking
3457 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/gc_globals.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/allocation.hpp"
40 #include "opto/ad.hpp"
41 #include "opto/block.hpp"
42 #include "opto/c2compiler.hpp"
43 #include "opto/c2_MacroAssembler.hpp"
44 #include "opto/callnode.hpp"
45 #include "opto/cfgnode.hpp"
46 #include "opto/locknode.hpp"
47 #include "opto/machnode.hpp"
48 #include "opto/node.hpp"
49 #include "opto/optoreg.hpp"
50 #include "opto/output.hpp"
51 #include "opto/regalloc.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/subnode.hpp"
54 #include "opto/type.hpp"
55 #include "runtime/handles.inline.hpp"
56 #include "runtime/sharedRuntime.hpp"
230 _first_block_size(0),
231 _handler_table(),
232 _inc_table(),
233 _stub_list(),
234 _oop_map_set(nullptr),
235 _scratch_buffer_blob(nullptr),
236 _scratch_locs_memory(nullptr),
237 _scratch_const_size(-1),
238 _in_scratch_emit_size(false),
239 _frame_slots(0),
240 _code_offsets(),
241 _node_bundling_limit(0),
242 _node_bundling_base(nullptr),
243 _orig_pc_slot(0),
244 _orig_pc_slot_offset_in_bytes(0),
245 _buf_sizes(),
246 _block(nullptr),
247 _index(0) {
248 C->set_output(this);
249 if (C->stub_name() == nullptr) {
250 int fixed_slots = C->fixed_slots();
251 if (C->needs_stack_repair()) {
252 fixed_slots -= 2;
253 }
254 // TODO 8284443 Only reserve extra slot if needed
255 if (InlineTypeReturnedAsFields) {
256 fixed_slots -= 2;
257 }
258 _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
259 }
260 }
261
262 PhaseOutput::~PhaseOutput() {
263 C->set_output(nullptr);
264 if (_scratch_buffer_blob != nullptr) {
265 BufferBlob::free(_scratch_buffer_blob);
266 }
267 }
268
269 void PhaseOutput::perform_mach_node_analysis() {
270 // Late barrier analysis must be done after schedule and bundle
271 // Otherwise liveness based spilling will fail
272 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
273 bs->late_barrier_analysis();
274
275 pd_perform_mach_node_analysis();
276
277 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
278 }
279
280 // Convert Nodes to instruction bits and pass off to the VM
281 void PhaseOutput::Output() {
282 // RootNode goes
283 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
284
285 // The number of new nodes (mostly MachNop) is proportional to
286 // the number of java calls and inner loops which are aligned.
287 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
288 C->inner_loops()*(OptoLoopAlignment-1)),
289 "out of nodes before code generation" ) ) {
290 return;
291 }
292 // Make sure I can find the Start Node
293 Block *entry = C->cfg()->get_block(1);
294 Block *broot = C->cfg()->get_root_block();
295
296 const StartNode *start = entry->head()->as_Start();
297
298 // Replace StartNode with prolog
299 Label verified_entry;
300 MachPrologNode* prolog = new MachPrologNode(&verified_entry);
301 entry->map_node(prolog, 0);
302 C->cfg()->map_node_to_block(prolog, entry);
303 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
304
305 // Virtual methods need an unverified entry point
306 if (C->is_osr_compilation()) {
307 if (PoisonOSREntry) {
308 // TODO: Should use a ShouldNotReachHereNode...
309 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
310 }
311 } else {
312 if (C->method()) {
313 if (C->method()->has_scalarized_args()) {
314 // Add entry point to unpack all inline type arguments
315 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
316 if (!C->method()->is_static()) {
317 // Add verified/unverified entry points to only unpack inline type receiver at interface calls
318 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
319 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
320 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
321 }
322 } else if (!C->method()->is_static()) {
323 // Insert unvalidated entry point
324 C->cfg()->insert(broot, 0, new MachUEPNode());
325 }
326 }
327 }
328
329 // Break before main entry point
330 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
331 (OptoBreakpoint && C->is_method_compilation()) ||
332 (OptoBreakpointOSR && C->is_osr_compilation()) ||
333 (OptoBreakpointC2R && !C->method()) ) {
334 // checking for C->method() means that OptoBreakpoint does not apply to
335 // runtime stubs or frame converters
336 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
337 }
338
339 // Insert epilogs before every return
340 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
341 Block* block = C->cfg()->get_block(i);
342 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
343 Node* m = block->end();
344 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
345 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
346 block->add_inst(epilog);
347 C->cfg()->map_node_to_block(epilog, block);
348 }
349 }
350 }
351
352 // Keeper of sizing aspects
353 _buf_sizes = BufferSizingData();
354
355 // Initialize code buffer
356 estimate_buffer_size(_buf_sizes._const);
357 if (C->failing()) return;
358
359 // Pre-compute the length of blocks and replace
360 // long branches with short if machine supports it.
361 // Must be done before ScheduleAndBundle due to SPARC delay slots
362 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
363 blk_starts[0] = 0;
364 shorten_branches(blk_starts);
365
366 if (!C->is_osr_compilation() && C->has_scalarized_args()) {
367 // Compute the offsets of the entry points required by the inline type calling convention
368 if (!C->method()->is_static()) {
369 // We have entries at the beginning of the method, implemented by the first 4 nodes.
370 // Entry (unverified) @ offset 0
371 // Verified_Inline_Entry_RO
372 // Inline_Entry (unverified)
373 // Verified_Inline_Entry
374 uint offset = 0;
375 _code_offsets.set_value(CodeOffsets::Entry, offset);
376
377 offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
378 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
379
380 offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
381 _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
382
383 offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
384 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
385 } else {
386 _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
387 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
388 }
389 }
390
391 ScheduleAndBundle();
392 if (C->failing()) {
393 return;
394 }
395
396 perform_mach_node_analysis();
397
398 // Complete sizing of codebuffer
399 CodeBuffer* cb = init_buffer();
400 if (cb == nullptr || C->failing()) {
401 return;
402 }
403
404 BuildOopMaps();
405
406 if (C->failing()) {
407 return;
408 }
409
410 C2_MacroAssembler masm(cb);
532 // Sum all instruction sizes to compute block size
533 uint last_inst = block->number_of_nodes();
534 uint blk_size = 0;
535 for (uint j = 0; j < last_inst; j++) {
536 _index = j;
537 Node* nj = block->get_node(_index);
538 // Handle machine instruction nodes
539 if (nj->is_Mach()) {
540 MachNode* mach = nj->as_Mach();
541 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
542 reloc_size += mach->reloc();
543 if (mach->is_MachCall()) {
544 // add size information for trampoline stub
545 // class CallStubImpl is platform-specific and defined in the *.ad files.
546 stub_size += CallStubImpl::size_call_trampoline();
547 reloc_size += CallStubImpl::reloc_call_trampoline();
548
549 MachCallNode *mcall = mach->as_MachCall();
550 // This destination address is NOT PC-relative
551
552 if (mcall->entry_point() != nullptr) {
553 mcall->method_set((intptr_t)mcall->entry_point());
554 }
555
556 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
557 stub_size += CompiledDirectCall::to_interp_stub_size();
558 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
559 }
560 } else if (mach->is_MachSafePoint()) {
561 // If call/safepoint are adjacent, account for possible
562 // nop to disambiguate the two safepoints.
563 // ScheduleAndBundle() can rearrange nodes in a block,
564 // check for all offsets inside this block.
565 if (last_call_adr >= blk_starts[i]) {
566 blk_size += nop_size;
567 }
568 }
569 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
570 // Nop is inserted between "avoid back to back" instructions.
571 // ScheduleAndBundle() can rearrange nodes in a block,
572 // check for all offsets inside this block.
573 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
574 blk_size += nop_size;
789 // New functionality:
790 // Assert if the local is not top. In product mode let the new node
791 // override the old entry.
792 assert(local == C->top(), "LocArray collision");
793 if (local == C->top()) {
794 return;
795 }
796 array->pop();
797 }
798 const Type *t = local->bottom_type();
799
800 // Is it a safepoint scalar object node?
801 if (local->is_SafePointScalarObject()) {
802 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
803
804 ObjectValue* sv = (ObjectValue*) sv_for_node_id(objs, spobj->_idx);
805 if (sv == nullptr) {
806 ciKlass* cik = t->is_oopptr()->exact_klass();
807 assert(cik->is_instance_klass() ||
808 cik->is_array_klass(), "Not supported allocation.");
809 uint first_ind = spobj->first_index(sfpt->jvms());
810 // Nullable, scalarized inline types have an is_init input
811 // that needs to be checked before using the field values.
812 ScopeValue* is_init = nullptr;
813 if (cik->is_inlinetype()) {
814 Node* init_node = sfpt->in(first_ind++);
815 assert(init_node != nullptr, "is_init node not found");
816 if (!init_node->is_top()) {
817 const TypeInt* init_type = init_node->bottom_type()->is_int();
818 if (init_node->is_Con()) {
819 is_init = new ConstantIntValue(init_type->get_con());
820 } else {
821 OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
822 is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
823 }
824 }
825 }
826 sv = new ObjectValue(spobj->_idx,
827 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, is_init);
828 set_sv_for_object_node(objs, sv);
829
830 for (uint i = 0; i < spobj->n_fields(); i++) {
831 Node* fld_node = sfpt->in(first_ind+i);
832 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
833 }
834 }
835 array->append(sv);
836 return;
837 } else if (local->is_SafePointScalarMerge()) {
838 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
839 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
840
841 if (mv == nullptr) {
842 GrowableArray<ScopeValue*> deps;
843
844 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
845 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
846 assert(deps.length() == 1, "missing value");
847
848 int selector_idx = smerge->selector_idx(sfpt->jvms());
849 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
1028 bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const {
1029 for (int k = 0; k < monarray->length(); k++) {
1030 MonitorValue* mv = monarray->at(k);
1031 if (mv->owner() == ov) {
1032 return true;
1033 }
1034 }
1035
1036 return false;
1037 }
1038
1039 //--------------------------Process_OopMap_Node--------------------------------
1040 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1041 // Handle special safepoint nodes for synchronization
1042 MachSafePointNode *sfn = mach->as_MachSafePoint();
1043 MachCallNode *mcall;
1044
1045 int safepoint_pc_offset = current_offset;
1046 bool is_method_handle_invoke = false;
1047 bool return_oop = false;
1048 bool return_scalarized = false;
1049 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1050 bool arg_escape = false;
1051
1052 // Add the safepoint in the DebugInfoRecorder
1053 if( !mach->is_MachCall() ) {
1054 mcall = nullptr;
1055 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1056 } else {
1057 mcall = mach->as_MachCall();
1058
1059 // Is the call a MethodHandle call?
1060 if (mcall->is_MachCallJava()) {
1061 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1062 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1063 is_method_handle_invoke = true;
1064 }
1065 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1066 }
1067
1068 // Check if a call returns an object.
1069 if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1070 return_oop = true;
1071 }
1072 if (mcall->returns_scalarized()) {
1073 return_scalarized = true;
1074 }
1075 safepoint_pc_offset += mcall->ret_addr_offset();
1076 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1077 }
1078
1079 // Loop over the JVMState list to add scope information
1080 // Do not skip safepoints with a null method, they need monitor info
1081 JVMState* youngest_jvms = sfn->jvms();
1082 int max_depth = youngest_jvms->depth();
1083
1084 // Allocate the object pool for scalar-replaced objects -- the map from
1085 // small-integer keys (which can be recorded in the local and ostack
1086 // arrays) to descriptions of the object state.
1087 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1088
1089 // Visit scopes from oldest to youngest.
1090 for (int depth = 1; depth <= max_depth; depth++) {
1091 JVMState* jvms = youngest_jvms->of_depth(depth);
1092 int idx;
1093 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1094 // Safepoints that do not have method() set only provide oop-map and monitor info
1217 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1218 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1219
1220 // Make method available for all Safepoints
1221 ciMethod* scope_method = method ? method : C->method();
1222 // Describe the scope here
1223 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1224 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1225 // Now we can describe the scope.
1226 methodHandle null_mh;
1227 bool rethrow_exception = false;
1228 C->debug_info()->describe_scope(
1229 safepoint_pc_offset,
1230 null_mh,
1231 scope_method,
1232 jvms->bci(),
1233 jvms->should_reexecute(),
1234 rethrow_exception,
1235 is_method_handle_invoke,
1236 return_oop,
1237 return_scalarized,
1238 has_ea_local_in_scope,
1239 arg_escape,
1240 locvals,
1241 expvals,
1242 monvals
1243 );
1244 } // End jvms loop
1245
1246 // Mark the end of the scope set.
1247 C->debug_info()->end_safepoint(safepoint_pc_offset);
1248 }
1249
1250
1251
1252 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1253 class NonSafepointEmitter {
1254 Compile* C;
1255 JVMState* _pending_jvms;
1256 int _pending_offset;
1257
1592 MachNode *nop = new MachNopNode(nops_cnt);
1593 block->insert_node(nop, j++);
1594 last_inst++;
1595 C->cfg()->map_node_to_block(nop, block);
1596 // Ensure enough space.
1597 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1598 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1599 C->record_failure("CodeCache is full");
1600 return;
1601 }
1602 nop->emit(masm, C->regalloc());
1603 masm->code()->flush_bundle(true);
1604 current_offset = masm->offset();
1605 }
1606
1607 bool observe_safepoint = is_sfn;
1608 // Remember the start of the last call in a basic block
1609 if (is_mcall) {
1610 MachCallNode *mcall = mach->as_MachCall();
1611
1612 if (mcall->entry_point() != nullptr) {
1613 // This destination address is NOT PC-relative
1614 mcall->method_set((intptr_t)mcall->entry_point());
1615 }
1616
1617 // Save the return address
1618 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1619
1620 observe_safepoint = mcall->guaranteed_safepoint();
1621 }
1622
1623 // sfn will be valid whenever mcall is valid now because of inheritance
1624 if (observe_safepoint) {
1625 // Handle special safepoint nodes for synchronization
1626 if (!is_mcall) {
1627 MachSafePointNode *sfn = mach->as_MachSafePoint();
1628 // !!!!! Stubs only need an oopmap right now, so bail out
1629 if (sfn->jvms()->method() == nullptr) {
1630 // Write the oopmap directly to the code blob??!!
1631 continue;
1632 }
1633 } // End synchronization
1634
1635 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1759 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1760 node_offsets[n->_idx] = masm->offset();
1761 }
1762 #endif
1763 assert(!C->failing(), "Should not reach here if failing.");
1764
1765 // "Normal" instruction case
1766 DEBUG_ONLY(uint instr_offset = masm->offset());
1767 n->emit(masm, C->regalloc());
1768 current_offset = masm->offset();
1769
1770 // Above we only verified that there is enough space in the instruction section.
1771 // However, the instruction may emit stubs that cause code buffer expansion.
1772 // Bail out here if expansion failed due to a lack of code cache space.
1773 if (C->failing()) {
1774 return;
1775 }
1776
1777 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1778 "ret_addr_offset() not within emitted code");
1779 #ifdef ASSERT
1780 uint n_size = n->size(C->regalloc());
1781 if (n_size < (current_offset-instr_offset)) {
1782 MachNode* mach = n->as_Mach();
1783 n->dump();
1784 mach->dump_format(C->regalloc(), tty);
1785 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1786 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1787 tty->print_cr(" ------------------- ");
1788 BufferBlob* blob = this->scratch_buffer_blob();
1789 address blob_begin = blob->content_begin();
1790 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1791 assert(false, "wrong size of mach node");
1792 }
1793 #endif
1794 non_safepoints.observe_instruction(n, current_offset);
1795
1796 // mcall is last "call" that can be a safepoint
1797 // record it so we can see if a poll will directly follow it
1798 // in which case we'll need a pad to make the PcDesc sites unique
3192 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3193 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3194 }
3195 }
3196 // Do not allow defs of new derived values to float above GC
3197 // points unless the base is definitely available at the GC point.
3198
3199 Node *m = b->get_node(i);
3200
3201 // Add precedence edge from following safepoint to use of derived pointer
3202 if( last_safept_node != end_node &&
3203 m != last_safept_node) {
3204 for (uint k = 1; k < m->req(); k++) {
3205 const Type *t = m->in(k)->bottom_type();
3206 if( t->isa_oop_ptr() &&
3207 t->is_ptr()->offset() != 0 ) {
3208 last_safept_node->add_prec( m );
3209 break;
3210 }
3211 }
3212
3213 // Do not allow a CheckCastPP node whose input is a raw pointer to
3214 // float past a safepoint. This can occur when a buffered inline
3215 // type is allocated in a loop and the CheckCastPP from that
3216 // allocation is reused outside the loop. If the use inside the
3217 // loop is scalarized the CheckCastPP will no longer be connected
3218 // to the loop safepoint. See JDK-8264340.
3219 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3220 Node *def = m->in(1);
3221 if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3222 last_safept_node->add_prec(m);
3223 }
3224 }
3225 }
3226
3227 if( n->jvms() ) { // Precedence edge from derived to safept
3228 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3229 if( b->get_node(last_safept) != last_safept_node ) {
3230 last_safept = b->find_node(last_safept_node);
3231 }
3232 for( uint j=last_safept; j > i; j-- ) {
3233 Node *mach = b->get_node(j);
3234 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3235 mach->add_prec( n );
3236 }
3237 last_safept = i;
3238 last_safept_node = m;
3239 }
3240 }
3241
3242 if (fat_proj_seen) {
3243 // Garbage collect pinch nodes that were not consumed.
3244 // They are usually created by a fat kill MachProj for a call.
3363 }
3364 #endif
3365
3366 //-----------------------init_scratch_buffer_blob------------------------------
3367 // Construct a temporary BufferBlob and cache it for this compile.
3368 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3369 // If there is already a scratch buffer blob allocated and the
3370 // constant section is big enough, use it. Otherwise free the
3371 // current and allocate a new one.
3372 BufferBlob* blob = scratch_buffer_blob();
3373 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3374 // Use the current blob.
3375 } else {
3376 if (blob != nullptr) {
3377 BufferBlob::free(blob);
3378 }
3379
3380 ResourceMark rm;
3381 _scratch_const_size = const_size;
3382 int size = C2Compiler::initial_code_buffer_size(const_size);
3383 if (C->has_scalarized_args()) {
3384 // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3385 // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3386 ciMethod* method = C->method();
3387 int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3388 int arg_num = 0;
3389 if (!method->is_static()) {
3390 if (method->is_scalarized_arg(arg_num)) {
3391 size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3392 }
3393 arg_num++;
3394 }
3395 for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3396 if (method->is_scalarized_arg(arg_num)) {
3397 size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3398 }
3399 arg_num++;
3400 }
3401 }
3402 blob = BufferBlob::create("Compile::scratch_buffer", size);
3403 // Record the buffer blob for next time.
3404 set_scratch_buffer_blob(blob);
3405 // Have we run out of code space?
3406 if (scratch_buffer_blob() == nullptr) {
3407 // Let CompilerBroker disable further compilations.
3408 C->record_failure("Not enough space for scratch buffer in CodeCache");
3409 return;
3410 }
3411 }
3412
3413 // Initialize the relocation buffers
3414 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3415 set_scratch_locs_memory(locs_buf);
3416 }
3417
3418
3419 //-----------------------scratch_emit_size-------------------------------------
3420 // Helper function that computes size by emitting code
3421 uint PhaseOutput::scratch_emit_size(const Node* n) {
3452 buf.insts()->set_scratch_emit();
3453 buf.stubs()->set_scratch_emit();
3454
3455 // Do the emission.
3456
3457 Label fakeL; // Fake label for branch instructions.
3458 Label* saveL = nullptr;
3459 uint save_bnum = 0;
3460 bool is_branch = n->is_MachBranch();
3461 C2_MacroAssembler masm(&buf);
3462 masm.bind(fakeL);
3463 if (is_branch) {
3464 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3465 n->as_MachBranch()->label_set(&fakeL, 0);
3466 }
3467 n->emit(&masm, C->regalloc());
3468
3469 // Emitting into the scratch buffer should not fail
3470 assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3471
3472 // Restore label.
3473 if (is_branch) {
3474 n->as_MachBranch()->label_set(saveL, save_bnum);
3475 }
3476
3477 // End scratch_emit_size section.
3478 set_in_scratch_emit_size(false);
3479
3480 return buf.insts_size();
3481 }
3482
3483 void PhaseOutput::install() {
3484 if (!C->should_install_code()) {
3485 return;
3486 } else if (C->stub_function() != nullptr) {
3487 install_stub(C->stub_name());
3488 } else {
3489 install_code(C->method(),
3490 C->entry_bci(),
3491 CompileBroker::compiler2(),
3492 C->has_unsafe_access(),
3493 SharedRuntime::is_wide_vector(C->max_vector_size()));
3494 }
3495 }
3496
3497 void PhaseOutput::install_code(ciMethod* target,
3498 int entry_bci,
3499 AbstractCompiler* compiler,
3500 bool has_unsafe_access,
3501 bool has_wide_vectors) {
3502 // Check if we want to skip execution of all compiled code.
3503 {
3504 #ifndef PRODUCT
3505 if (OptoNoExecute) {
3506 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3507 return;
3508 }
3509 #endif
3510 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3511
3512 if (C->is_osr_compilation()) {
3513 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3514 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3515 } else {
3516 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3517 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3518 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3519 }
3520 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3521 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3522 }
3523 if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3524 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3525 }
3526 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3527 }
3528
3529 C->env()->register_method(target,
3530 entry_bci,
3531 &_code_offsets,
3532 _orig_pc_slot_offset_in_bytes,
3533 code_buffer(),
3534 frame_size_in_words(),
3535 _oop_map_set,
3536 &_handler_table,
3537 inc_table(),
3538 compiler,
3539 has_unsafe_access,
3540 SharedRuntime::is_wide_vector(C->max_vector_size()),
3541 C->has_monitors(),
3542 0);
3543
3544 if (C->log() != nullptr) { // Print code cache state into compiler log
3545 C->log()->code_cache_state();
3546 }
3547 }
3548 }
3549 void PhaseOutput::install_stub(const char* stub_name) {
3550 // Entry point will be accessed using stub_entry_point();
3551 if (code_buffer() == nullptr) {
3552 Matcher::soft_match_failure();
3553 } else {
3554 if (PrintAssembly && (WizardMode || Verbose))
3555 tty->print_cr("### Stub::%s", stub_name);
3556
3557 if (!C->failing()) {
3558 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3559
3560 // Make the NMethod
3561 // For now we mark the frame as never safe for profile stackwalking
3562 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|