16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/c2/barrierSetC2.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/allocation.hpp"
39 #include "opto/ad.hpp"
40 #include "opto/block.hpp"
41 #include "opto/c2compiler.hpp"
42 #include "opto/c2_MacroAssembler.hpp"
43 #include "opto/callnode.hpp"
44 #include "opto/cfgnode.hpp"
45 #include "opto/locknode.hpp"
46 #include "opto/machnode.hpp"
47 #include "opto/node.hpp"
48 #include "opto/optoreg.hpp"
49 #include "opto/output.hpp"
50 #include "opto/regalloc.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/type.hpp"
54 #include "runtime/handles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
229 _first_block_size(0),
230 _handler_table(),
231 _inc_table(),
232 _stub_list(),
233 _oop_map_set(nullptr),
234 _scratch_buffer_blob(nullptr),
235 _scratch_locs_memory(nullptr),
236 _scratch_const_size(-1),
237 _in_scratch_emit_size(false),
238 _frame_slots(0),
239 _code_offsets(),
240 _node_bundling_limit(0),
241 _node_bundling_base(nullptr),
242 _orig_pc_slot(0),
243 _orig_pc_slot_offset_in_bytes(0),
244 _buf_sizes(),
245 _block(nullptr),
246 _index(0) {
247 C->set_output(this);
248 if (C->stub_name() == nullptr) {
249 _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
250 }
251 }
252
253 PhaseOutput::~PhaseOutput() {
254 C->set_output(nullptr);
255 if (_scratch_buffer_blob != nullptr) {
256 BufferBlob::free(_scratch_buffer_blob);
257 }
258 }
259
260 void PhaseOutput::perform_mach_node_analysis() {
261 // Late barrier analysis must be done after schedule and bundle
262 // Otherwise liveness based spilling will fail
263 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
264 bs->late_barrier_analysis();
265
266 pd_perform_mach_node_analysis();
267
268 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
269 }
270
271 // Convert Nodes to instruction bits and pass off to the VM
272 void PhaseOutput::Output() {
273 // RootNode goes
274 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
275
276 // The number of new nodes (mostly MachNop) is proportional to
277 // the number of java calls and inner loops which are aligned.
278 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
279 C->inner_loops()*(OptoLoopAlignment-1)),
280 "out of nodes before code generation" ) ) {
281 return;
282 }
283 // Make sure I can find the Start Node
284 Block *entry = C->cfg()->get_block(1);
285 Block *broot = C->cfg()->get_root_block();
286
287 const StartNode *start = entry->head()->as_Start();
288
289 // Replace StartNode with prolog
290 MachPrologNode *prolog = new MachPrologNode();
291 entry->map_node(prolog, 0);
292 C->cfg()->map_node_to_block(prolog, entry);
293 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
294
295 // Virtual methods need an unverified entry point
296
297 if( C->is_osr_compilation() ) {
298 if( PoisonOSREntry ) {
299 // TODO: Should use a ShouldNotReachHereNode...
300 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
301 }
302 } else {
303 if( C->method() && !C->method()->flags().is_static() ) {
304 // Insert unvalidated entry point
305 C->cfg()->insert( broot, 0, new MachUEPNode() );
306 }
307
308 }
309
310 // Break before main entry point
311 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
312 (OptoBreakpoint && C->is_method_compilation()) ||
313 (OptoBreakpointOSR && C->is_osr_compilation()) ||
314 (OptoBreakpointC2R && !C->method()) ) {
315 // checking for C->method() means that OptoBreakpoint does not apply to
316 // runtime stubs or frame converters
317 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
318 }
319
320 // Insert epilogs before every return
321 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
322 Block* block = C->cfg()->get_block(i);
323 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
324 Node* m = block->end();
325 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
326 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
327 block->add_inst(epilog);
328 C->cfg()->map_node_to_block(epilog, block);
329 }
330 }
331 }
332
333 // Keeper of sizing aspects
334 _buf_sizes = BufferSizingData();
335
336 // Initialize code buffer
337 estimate_buffer_size(_buf_sizes._const);
338 if (C->failing()) return;
339
340 // Pre-compute the length of blocks and replace
341 // long branches with short if machine supports it.
342 // Must be done before ScheduleAndBundle due to SPARC delay slots
343 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
344 blk_starts[0] = 0;
345 shorten_branches(blk_starts);
346
347 ScheduleAndBundle();
348 if (C->failing()) {
349 return;
350 }
351
352 perform_mach_node_analysis();
353
354 // Complete sizing of codebuffer
355 CodeBuffer* cb = init_buffer();
356 if (cb == nullptr || C->failing()) {
357 return;
358 }
359
360 BuildOopMaps();
361
362 if (C->failing()) {
363 return;
364 }
365
366 C2_MacroAssembler masm(cb);
488 // Sum all instruction sizes to compute block size
489 uint last_inst = block->number_of_nodes();
490 uint blk_size = 0;
491 for (uint j = 0; j < last_inst; j++) {
492 _index = j;
493 Node* nj = block->get_node(_index);
494 // Handle machine instruction nodes
495 if (nj->is_Mach()) {
496 MachNode* mach = nj->as_Mach();
497 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
498 reloc_size += mach->reloc();
499 if (mach->is_MachCall()) {
500 // add size information for trampoline stub
501 // class CallStubImpl is platform-specific and defined in the *.ad files.
502 stub_size += CallStubImpl::size_call_trampoline();
503 reloc_size += CallStubImpl::reloc_call_trampoline();
504
505 MachCallNode *mcall = mach->as_MachCall();
506 // This destination address is NOT PC-relative
507
508 mcall->method_set((intptr_t)mcall->entry_point());
509
510 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
511 stub_size += CompiledDirectCall::to_interp_stub_size();
512 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
513 }
514 } else if (mach->is_MachSafePoint()) {
515 // If call/safepoint are adjacent, account for possible
516 // nop to disambiguate the two safepoints.
517 // ScheduleAndBundle() can rearrange nodes in a block,
518 // check for all offsets inside this block.
519 if (last_call_adr >= blk_starts[i]) {
520 blk_size += nop_size;
521 }
522 }
523 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
524 // Nop is inserted between "avoid back to back" instructions.
525 // ScheduleAndBundle() can rearrange nodes in a block,
526 // check for all offsets inside this block.
527 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
528 blk_size += nop_size;
743 // New functionality:
744 // Assert if the local is not top. In product mode let the new node
745 // override the old entry.
746 assert(local == C->top(), "LocArray collision");
747 if (local == C->top()) {
748 return;
749 }
750 array->pop();
751 }
752 const Type *t = local->bottom_type();
753
754 // Is it a safepoint scalar object node?
755 if (local->is_SafePointScalarObject()) {
756 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
757
758 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
759 if (sv == nullptr) {
760 ciKlass* cik = t->is_oopptr()->exact_klass();
761 assert(cik->is_instance_klass() ||
762 cik->is_array_klass(), "Not supported allocation.");
763 sv = new ObjectValue(spobj->_idx,
764 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
765 set_sv_for_object_node(objs, sv);
766
767 uint first_ind = spobj->first_index(sfpt->jvms());
768 for (uint i = 0; i < spobj->n_fields(); i++) {
769 Node* fld_node = sfpt->in(first_ind+i);
770 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
771 }
772 }
773 array->append(sv);
774 return;
775 } else if (local->is_SafePointScalarMerge()) {
776 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
777 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
778
779 if (mv == nullptr) {
780 GrowableArray<ScopeValue*> deps;
781
782 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
783 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
784 assert(deps.length() == 1, "missing value");
785
786 int selector_idx = smerge->selector_idx(sfpt->jvms());
787 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
987 continue;
988 }
989
990 ObjectValue* other = sv_for_node_id(objs, n->_idx);
991 if (ov == other) {
992 return true;
993 }
994 }
995 return false;
996 }
997
998 //--------------------------Process_OopMap_Node--------------------------------
999 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1000 // Handle special safepoint nodes for synchronization
1001 MachSafePointNode *sfn = mach->as_MachSafePoint();
1002 MachCallNode *mcall;
1003
1004 int safepoint_pc_offset = current_offset;
1005 bool is_method_handle_invoke = false;
1006 bool return_oop = false;
1007 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1008 bool arg_escape = false;
1009
1010 // Add the safepoint in the DebugInfoRecorder
1011 if( !mach->is_MachCall() ) {
1012 mcall = nullptr;
1013 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1014 } else {
1015 mcall = mach->as_MachCall();
1016
1017 // Is the call a MethodHandle call?
1018 if (mcall->is_MachCallJava()) {
1019 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1020 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1021 is_method_handle_invoke = true;
1022 }
1023 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1024 }
1025
1026 // Check if a call returns an object.
1027 if (mcall->returns_pointer()) {
1028 return_oop = true;
1029 }
1030 safepoint_pc_offset += mcall->ret_addr_offset();
1031 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1032 }
1033
1034 // Loop over the JVMState list to add scope information
1035 // Do not skip safepoints with a null method, they need monitor info
1036 JVMState* youngest_jvms = sfn->jvms();
1037 int max_depth = youngest_jvms->depth();
1038
1039 // Allocate the object pool for scalar-replaced objects -- the map from
1040 // small-integer keys (which can be recorded in the local and ostack
1041 // arrays) to descriptions of the object state.
1042 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1043
1044 // Visit scopes from oldest to youngest.
1045 for (int depth = 1; depth <= max_depth; depth++) {
1046 JVMState* jvms = youngest_jvms->of_depth(depth);
1047 int idx;
1048 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1049 // Safepoints that do not have method() set only provide oop-map and monitor info
1175 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1176 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1177
1178 // Make method available for all Safepoints
1179 ciMethod* scope_method = method ? method : C->method();
1180 // Describe the scope here
1181 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1182 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1183 // Now we can describe the scope.
1184 methodHandle null_mh;
1185 bool rethrow_exception = false;
1186 C->debug_info()->describe_scope(
1187 safepoint_pc_offset,
1188 null_mh,
1189 scope_method,
1190 jvms->bci(),
1191 jvms->should_reexecute(),
1192 rethrow_exception,
1193 is_method_handle_invoke,
1194 return_oop,
1195 has_ea_local_in_scope,
1196 arg_escape,
1197 locvals,
1198 expvals,
1199 monvals
1200 );
1201 } // End jvms loop
1202
1203 // Mark the end of the scope set.
1204 C->debug_info()->end_safepoint(safepoint_pc_offset);
1205 }
1206
1207
1208
1209 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1210 class NonSafepointEmitter {
1211 Compile* C;
1212 JVMState* _pending_jvms;
1213 int _pending_offset;
1214
1549 MachNode *nop = new MachNopNode(nops_cnt);
1550 block->insert_node(nop, j++);
1551 last_inst++;
1552 C->cfg()->map_node_to_block(nop, block);
1553 // Ensure enough space.
1554 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1555 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1556 C->record_failure("CodeCache is full");
1557 return;
1558 }
1559 nop->emit(masm, C->regalloc());
1560 masm->code()->flush_bundle(true);
1561 current_offset = masm->offset();
1562 }
1563
1564 bool observe_safepoint = is_sfn;
1565 // Remember the start of the last call in a basic block
1566 if (is_mcall) {
1567 MachCallNode *mcall = mach->as_MachCall();
1568
1569 // This destination address is NOT PC-relative
1570 mcall->method_set((intptr_t)mcall->entry_point());
1571
1572 // Save the return address
1573 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1574
1575 observe_safepoint = mcall->guaranteed_safepoint();
1576 }
1577
1578 // sfn will be valid whenever mcall is valid now because of inheritance
1579 if (observe_safepoint) {
1580 // Handle special safepoint nodes for synchronization
1581 if (!is_mcall) {
1582 MachSafePointNode *sfn = mach->as_MachSafePoint();
1583 // !!!!! Stubs only need an oopmap right now, so bail out
1584 if (sfn->jvms()->method() == nullptr) {
1585 // Write the oopmap directly to the code blob??!!
1586 continue;
1587 }
1588 } // End synchronization
1589
1590 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1691 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1692 node_offsets[n->_idx] = masm->offset();
1693 }
1694 #endif
1695 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1696
1697 // "Normal" instruction case
1698 DEBUG_ONLY(uint instr_offset = masm->offset());
1699 n->emit(masm, C->regalloc());
1700 current_offset = masm->offset();
1701
1702 // Above we only verified that there is enough space in the instruction section.
1703 // However, the instruction may emit stubs that cause code buffer expansion.
1704 // Bail out here if expansion failed due to a lack of code cache space.
1705 if (C->failing()) {
1706 return;
1707 }
1708
1709 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1710 "ret_addr_offset() not within emitted code");
1711
1712 #ifdef ASSERT
1713 uint n_size = n->size(C->regalloc());
1714 if (n_size < (current_offset-instr_offset)) {
1715 MachNode* mach = n->as_Mach();
1716 n->dump();
1717 mach->dump_format(C->regalloc(), tty);
1718 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1719 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1720 tty->print_cr(" ------------------- ");
1721 BufferBlob* blob = this->scratch_buffer_blob();
1722 address blob_begin = blob->content_begin();
1723 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1724 assert(false, "wrong size of mach node");
1725 }
1726 #endif
1727 non_safepoints.observe_instruction(n, current_offset);
1728
1729 // mcall is last "call" that can be a safepoint
1730 // record it so we can see if a poll will directly follow it
1731 // in which case we'll need a pad to make the PcDesc sites unique
3127 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3128 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3129 }
3130 }
3131 // Do not allow defs of new derived values to float above GC
3132 // points unless the base is definitely available at the GC point.
3133
3134 Node *m = b->get_node(i);
3135
3136 // Add precedence edge from following safepoint to use of derived pointer
3137 if( last_safept_node != end_node &&
3138 m != last_safept_node) {
3139 for (uint k = 1; k < m->req(); k++) {
3140 const Type *t = m->in(k)->bottom_type();
3141 if( t->isa_oop_ptr() &&
3142 t->is_ptr()->offset() != 0 ) {
3143 last_safept_node->add_prec( m );
3144 break;
3145 }
3146 }
3147 }
3148
3149 if( n->jvms() ) { // Precedence edge from derived to safept
3150 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3151 if( b->get_node(last_safept) != last_safept_node ) {
3152 last_safept = b->find_node(last_safept_node);
3153 }
3154 for( uint j=last_safept; j > i; j-- ) {
3155 Node *mach = b->get_node(j);
3156 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3157 mach->add_prec( n );
3158 }
3159 last_safept = i;
3160 last_safept_node = m;
3161 }
3162 }
3163
3164 if (fat_proj_seen) {
3165 // Garbage collect pinch nodes that were not consumed.
3166 // They are usually created by a fat kill MachProj for a call.
3285 }
3286 #endif
3287
3288 //-----------------------init_scratch_buffer_blob------------------------------
3289 // Construct a temporary BufferBlob and cache it for this compile.
3290 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3291 // If there is already a scratch buffer blob allocated and the
3292 // constant section is big enough, use it. Otherwise free the
3293 // current and allocate a new one.
3294 BufferBlob* blob = scratch_buffer_blob();
3295 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3296 // Use the current blob.
3297 } else {
3298 if (blob != nullptr) {
3299 BufferBlob::free(blob);
3300 }
3301
3302 ResourceMark rm;
3303 _scratch_const_size = const_size;
3304 int size = C2Compiler::initial_code_buffer_size(const_size);
3305 blob = BufferBlob::create("Compile::scratch_buffer", size);
3306 // Record the buffer blob for next time.
3307 set_scratch_buffer_blob(blob);
3308 // Have we run out of code space?
3309 if (scratch_buffer_blob() == nullptr) {
3310 // Let CompilerBroker disable further compilations.
3311 C->record_failure("Not enough space for scratch buffer in CodeCache");
3312 return;
3313 }
3314 }
3315
3316 // Initialize the relocation buffers
3317 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3318 set_scratch_locs_memory(locs_buf);
3319 }
3320
3321
3322 //-----------------------scratch_emit_size-------------------------------------
3323 // Helper function that computes size by emitting code
3324 uint PhaseOutput::scratch_emit_size(const Node* n) {
3355 buf.insts()->set_scratch_emit();
3356 buf.stubs()->set_scratch_emit();
3357
3358 // Do the emission.
3359
3360 Label fakeL; // Fake label for branch instructions.
3361 Label* saveL = nullptr;
3362 uint save_bnum = 0;
3363 bool is_branch = n->is_MachBranch();
3364 C2_MacroAssembler masm(&buf);
3365 masm.bind(fakeL);
3366 if (is_branch) {
3367 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3368 n->as_MachBranch()->label_set(&fakeL, 0);
3369 }
3370 n->emit(&masm, C->regalloc());
3371
3372 // Emitting into the scratch buffer should not fail
3373 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3374
3375 if (is_branch) // Restore label.
3376 n->as_MachBranch()->label_set(saveL, save_bnum);
3377
3378 // End scratch_emit_size section.
3379 set_in_scratch_emit_size(false);
3380
3381 return buf.insts_size();
3382 }
3383
3384 void PhaseOutput::install() {
3385 if (!C->should_install_code()) {
3386 return;
3387 } else if (C->stub_function() != nullptr) {
3388 install_stub(C->stub_name());
3389 } else {
3390 install_code(C->method(),
3391 C->entry_bci(),
3392 CompileBroker::compiler2(),
3393 C->has_unsafe_access(),
3394 SharedRuntime::is_wide_vector(C->max_vector_size()));
3395 }
3396 }
3397
3398 void PhaseOutput::install_code(ciMethod* target,
3399 int entry_bci,
3400 AbstractCompiler* compiler,
3401 bool has_unsafe_access,
3402 bool has_wide_vectors) {
3403 // Check if we want to skip execution of all compiled code.
3404 {
3405 #ifndef PRODUCT
3406 if (OptoNoExecute) {
3407 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3408 return;
3409 }
3410 #endif
3411 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3412
3413 if (C->is_osr_compilation()) {
3414 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3415 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3416 } else {
3417 if (!target->is_static()) {
3418 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3419 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3420 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3421 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3422 }
3423 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3424 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3425 }
3426
3427 C->env()->register_method(target,
3428 entry_bci,
3429 &_code_offsets,
3430 _orig_pc_slot_offset_in_bytes,
3431 code_buffer(),
3432 frame_size_in_words(),
3433 oop_map_set(),
3434 &_handler_table,
3435 inc_table(),
3436 compiler,
3437 has_unsafe_access,
3438 SharedRuntime::is_wide_vector(C->max_vector_size()),
3439 C->has_monitors(),
3440 C->has_scoped_access(),
3441 0);
3442
3443 if (C->log() != nullptr) { // Print code cache state into compiler log
3444 C->log()->code_cache_state();
3445 }
3446 }
3447 }
3448 void PhaseOutput::install_stub(const char* stub_name) {
3449 // Entry point will be accessed using stub_entry_point();
3450 if (code_buffer() == nullptr) {
3451 Matcher::soft_match_failure();
3452 } else {
3453 if (PrintAssembly && (WizardMode || Verbose))
3454 tty->print_cr("### Stub::%s", stub_name);
3455
3456 if (!C->failing()) {
3457 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3458
3459 // Make the NMethod
3460 // For now we mark the frame as never safe for profile stackwalking
3461 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/gc_globals.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/allocation.hpp"
40 #include "opto/ad.hpp"
41 #include "opto/block.hpp"
42 #include "opto/c2compiler.hpp"
43 #include "opto/c2_MacroAssembler.hpp"
44 #include "opto/callnode.hpp"
45 #include "opto/cfgnode.hpp"
46 #include "opto/locknode.hpp"
47 #include "opto/machnode.hpp"
48 #include "opto/node.hpp"
49 #include "opto/optoreg.hpp"
50 #include "opto/output.hpp"
51 #include "opto/regalloc.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/subnode.hpp"
54 #include "opto/type.hpp"
55 #include "runtime/handles.inline.hpp"
56 #include "runtime/sharedRuntime.hpp"
230 _first_block_size(0),
231 _handler_table(),
232 _inc_table(),
233 _stub_list(),
234 _oop_map_set(nullptr),
235 _scratch_buffer_blob(nullptr),
236 _scratch_locs_memory(nullptr),
237 _scratch_const_size(-1),
238 _in_scratch_emit_size(false),
239 _frame_slots(0),
240 _code_offsets(),
241 _node_bundling_limit(0),
242 _node_bundling_base(nullptr),
243 _orig_pc_slot(0),
244 _orig_pc_slot_offset_in_bytes(0),
245 _buf_sizes(),
246 _block(nullptr),
247 _index(0) {
248 C->set_output(this);
249 if (C->stub_name() == nullptr) {
250 int fixed_slots = C->fixed_slots();
251 if (C->needs_stack_repair()) {
252 fixed_slots -= 2;
253 }
254 // TODO 8284443 Only reserve extra slot if needed
255 if (InlineTypeReturnedAsFields) {
256 fixed_slots -= 2;
257 }
258 _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
259 }
260 }
261
262 PhaseOutput::~PhaseOutput() {
263 C->set_output(nullptr);
264 if (_scratch_buffer_blob != nullptr) {
265 BufferBlob::free(_scratch_buffer_blob);
266 }
267 }
268
269 void PhaseOutput::perform_mach_node_analysis() {
270 // Late barrier analysis must be done after schedule and bundle
271 // Otherwise liveness based spilling will fail
272 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
273 bs->late_barrier_analysis();
274
275 pd_perform_mach_node_analysis();
276
277 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
278 }
279
280 // Convert Nodes to instruction bits and pass off to the VM
281 void PhaseOutput::Output() {
282 // RootNode goes
283 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
284
285 // The number of new nodes (mostly MachNop) is proportional to
286 // the number of java calls and inner loops which are aligned.
287 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
288 C->inner_loops()*(OptoLoopAlignment-1)),
289 "out of nodes before code generation" ) ) {
290 return;
291 }
292 // Make sure I can find the Start Node
293 Block *entry = C->cfg()->get_block(1);
294 Block *broot = C->cfg()->get_root_block();
295
296 const StartNode *start = entry->head()->as_Start();
297
298 // Replace StartNode with prolog
299 Label verified_entry;
300 MachPrologNode* prolog = new MachPrologNode(&verified_entry);
301 entry->map_node(prolog, 0);
302 C->cfg()->map_node_to_block(prolog, entry);
303 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
304
305 // Virtual methods need an unverified entry point
306 if (C->is_osr_compilation()) {
307 if (PoisonOSREntry) {
308 // TODO: Should use a ShouldNotReachHereNode...
309 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
310 }
311 } else {
312 if (C->method()) {
313 if (C->method()->has_scalarized_args()) {
314 // Add entry point to unpack all inline type arguments
315 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
316 if (!C->method()->is_static()) {
317 // Add verified/unverified entry points to only unpack inline type receiver at interface calls
318 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
319 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
320 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
321 }
322 } else if (!C->method()->is_static()) {
323 // Insert unvalidated entry point
324 C->cfg()->insert(broot, 0, new MachUEPNode());
325 }
326 }
327 }
328
329 // Break before main entry point
330 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
331 (OptoBreakpoint && C->is_method_compilation()) ||
332 (OptoBreakpointOSR && C->is_osr_compilation()) ||
333 (OptoBreakpointC2R && !C->method()) ) {
334 // checking for C->method() means that OptoBreakpoint does not apply to
335 // runtime stubs or frame converters
336 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
337 }
338
339 // Insert epilogs before every return
340 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
341 Block* block = C->cfg()->get_block(i);
342 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
343 Node* m = block->end();
344 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
345 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
346 block->add_inst(epilog);
347 C->cfg()->map_node_to_block(epilog, block);
348 }
349 }
350 }
351
352 // Keeper of sizing aspects
353 _buf_sizes = BufferSizingData();
354
355 // Initialize code buffer
356 estimate_buffer_size(_buf_sizes._const);
357 if (C->failing()) return;
358
359 // Pre-compute the length of blocks and replace
360 // long branches with short if machine supports it.
361 // Must be done before ScheduleAndBundle due to SPARC delay slots
362 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
363 blk_starts[0] = 0;
364 shorten_branches(blk_starts);
365
366 if (!C->is_osr_compilation() && C->has_scalarized_args()) {
367 // Compute the offsets of the entry points required by the inline type calling convention
368 if (!C->method()->is_static()) {
369 // We have entries at the beginning of the method, implemented by the first 4 nodes.
370 // Entry (unverified) @ offset 0
371 // Verified_Inline_Entry_RO
372 // Inline_Entry (unverified)
373 // Verified_Inline_Entry
374 uint offset = 0;
375 _code_offsets.set_value(CodeOffsets::Entry, offset);
376
377 offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
378 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
379
380 offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
381 _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
382
383 offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
384 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
385 } else {
386 _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
387 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
388 }
389 }
390
391 ScheduleAndBundle();
392 if (C->failing()) {
393 return;
394 }
395
396 perform_mach_node_analysis();
397
398 // Complete sizing of codebuffer
399 CodeBuffer* cb = init_buffer();
400 if (cb == nullptr || C->failing()) {
401 return;
402 }
403
404 BuildOopMaps();
405
406 if (C->failing()) {
407 return;
408 }
409
410 C2_MacroAssembler masm(cb);
532 // Sum all instruction sizes to compute block size
533 uint last_inst = block->number_of_nodes();
534 uint blk_size = 0;
535 for (uint j = 0; j < last_inst; j++) {
536 _index = j;
537 Node* nj = block->get_node(_index);
538 // Handle machine instruction nodes
539 if (nj->is_Mach()) {
540 MachNode* mach = nj->as_Mach();
541 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
542 reloc_size += mach->reloc();
543 if (mach->is_MachCall()) {
544 // add size information for trampoline stub
545 // class CallStubImpl is platform-specific and defined in the *.ad files.
546 stub_size += CallStubImpl::size_call_trampoline();
547 reloc_size += CallStubImpl::reloc_call_trampoline();
548
549 MachCallNode *mcall = mach->as_MachCall();
550 // This destination address is NOT PC-relative
551
552 if (mcall->entry_point() != nullptr) {
553 mcall->method_set((intptr_t)mcall->entry_point());
554 }
555
556 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
557 stub_size += CompiledDirectCall::to_interp_stub_size();
558 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
559 }
560 } else if (mach->is_MachSafePoint()) {
561 // If call/safepoint are adjacent, account for possible
562 // nop to disambiguate the two safepoints.
563 // ScheduleAndBundle() can rearrange nodes in a block,
564 // check for all offsets inside this block.
565 if (last_call_adr >= blk_starts[i]) {
566 blk_size += nop_size;
567 }
568 }
569 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
570 // Nop is inserted between "avoid back to back" instructions.
571 // ScheduleAndBundle() can rearrange nodes in a block,
572 // check for all offsets inside this block.
573 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
574 blk_size += nop_size;
789 // New functionality:
790 // Assert if the local is not top. In product mode let the new node
791 // override the old entry.
792 assert(local == C->top(), "LocArray collision");
793 if (local == C->top()) {
794 return;
795 }
796 array->pop();
797 }
798 const Type *t = local->bottom_type();
799
800 // Is it a safepoint scalar object node?
801 if (local->is_SafePointScalarObject()) {
802 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
803
804 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
805 if (sv == nullptr) {
806 ciKlass* cik = t->is_oopptr()->exact_klass();
807 assert(cik->is_instance_klass() ||
808 cik->is_array_klass(), "Not supported allocation.");
809 uint first_ind = spobj->first_index(sfpt->jvms());
810 // Nullable, scalarized inline types have an is_init input
811 // that needs to be checked before using the field values.
812 ScopeValue* is_init = nullptr;
813 if (cik->is_inlinetype()) {
814 Node* init_node = sfpt->in(first_ind++);
815 assert(init_node != nullptr, "is_init node not found");
816 if (!init_node->is_top()) {
817 const TypeInt* init_type = init_node->bottom_type()->is_int();
818 if (init_node->is_Con()) {
819 is_init = new ConstantIntValue(init_type->get_con());
820 } else {
821 OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
822 is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
823 }
824 }
825 }
826 sv = new ObjectValue(spobj->_idx,
827 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, is_init);
828 set_sv_for_object_node(objs, sv);
829
830 for (uint i = 0; i < spobj->n_fields(); i++) {
831 Node* fld_node = sfpt->in(first_ind+i);
832 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
833 }
834 }
835 array->append(sv);
836 return;
837 } else if (local->is_SafePointScalarMerge()) {
838 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
839 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
840
841 if (mv == nullptr) {
842 GrowableArray<ScopeValue*> deps;
843
844 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
845 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
846 assert(deps.length() == 1, "missing value");
847
848 int selector_idx = smerge->selector_idx(sfpt->jvms());
849 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
1049 continue;
1050 }
1051
1052 ObjectValue* other = sv_for_node_id(objs, n->_idx);
1053 if (ov == other) {
1054 return true;
1055 }
1056 }
1057 return false;
1058 }
1059
1060 //--------------------------Process_OopMap_Node--------------------------------
1061 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1062 // Handle special safepoint nodes for synchronization
1063 MachSafePointNode *sfn = mach->as_MachSafePoint();
1064 MachCallNode *mcall;
1065
1066 int safepoint_pc_offset = current_offset;
1067 bool is_method_handle_invoke = false;
1068 bool return_oop = false;
1069 bool return_scalarized = false;
1070 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1071 bool arg_escape = false;
1072
1073 // Add the safepoint in the DebugInfoRecorder
1074 if( !mach->is_MachCall() ) {
1075 mcall = nullptr;
1076 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1077 } else {
1078 mcall = mach->as_MachCall();
1079
1080 // Is the call a MethodHandle call?
1081 if (mcall->is_MachCallJava()) {
1082 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1083 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1084 is_method_handle_invoke = true;
1085 }
1086 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1087 }
1088
1089 // Check if a call returns an object.
1090 if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1091 return_oop = true;
1092 }
1093 if (mcall->returns_scalarized()) {
1094 return_scalarized = true;
1095 }
1096 safepoint_pc_offset += mcall->ret_addr_offset();
1097 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1098 }
1099
1100 // Loop over the JVMState list to add scope information
1101 // Do not skip safepoints with a null method, they need monitor info
1102 JVMState* youngest_jvms = sfn->jvms();
1103 int max_depth = youngest_jvms->depth();
1104
1105 // Allocate the object pool for scalar-replaced objects -- the map from
1106 // small-integer keys (which can be recorded in the local and ostack
1107 // arrays) to descriptions of the object state.
1108 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1109
1110 // Visit scopes from oldest to youngest.
1111 for (int depth = 1; depth <= max_depth; depth++) {
1112 JVMState* jvms = youngest_jvms->of_depth(depth);
1113 int idx;
1114 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1115 // Safepoints that do not have method() set only provide oop-map and monitor info
1241 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1242 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1243
1244 // Make method available for all Safepoints
1245 ciMethod* scope_method = method ? method : C->method();
1246 // Describe the scope here
1247 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1248 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1249 // Now we can describe the scope.
1250 methodHandle null_mh;
1251 bool rethrow_exception = false;
1252 C->debug_info()->describe_scope(
1253 safepoint_pc_offset,
1254 null_mh,
1255 scope_method,
1256 jvms->bci(),
1257 jvms->should_reexecute(),
1258 rethrow_exception,
1259 is_method_handle_invoke,
1260 return_oop,
1261 return_scalarized,
1262 has_ea_local_in_scope,
1263 arg_escape,
1264 locvals,
1265 expvals,
1266 monvals
1267 );
1268 } // End jvms loop
1269
1270 // Mark the end of the scope set.
1271 C->debug_info()->end_safepoint(safepoint_pc_offset);
1272 }
1273
1274
1275
1276 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1277 class NonSafepointEmitter {
1278 Compile* C;
1279 JVMState* _pending_jvms;
1280 int _pending_offset;
1281
1616 MachNode *nop = new MachNopNode(nops_cnt);
1617 block->insert_node(nop, j++);
1618 last_inst++;
1619 C->cfg()->map_node_to_block(nop, block);
1620 // Ensure enough space.
1621 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1622 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1623 C->record_failure("CodeCache is full");
1624 return;
1625 }
1626 nop->emit(masm, C->regalloc());
1627 masm->code()->flush_bundle(true);
1628 current_offset = masm->offset();
1629 }
1630
1631 bool observe_safepoint = is_sfn;
1632 // Remember the start of the last call in a basic block
1633 if (is_mcall) {
1634 MachCallNode *mcall = mach->as_MachCall();
1635
1636 if (mcall->entry_point() != nullptr) {
1637 // This destination address is NOT PC-relative
1638 mcall->method_set((intptr_t)mcall->entry_point());
1639 }
1640
1641 // Save the return address
1642 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1643
1644 observe_safepoint = mcall->guaranteed_safepoint();
1645 }
1646
1647 // sfn will be valid whenever mcall is valid now because of inheritance
1648 if (observe_safepoint) {
1649 // Handle special safepoint nodes for synchronization
1650 if (!is_mcall) {
1651 MachSafePointNode *sfn = mach->as_MachSafePoint();
1652 // !!!!! Stubs only need an oopmap right now, so bail out
1653 if (sfn->jvms()->method() == nullptr) {
1654 // Write the oopmap directly to the code blob??!!
1655 continue;
1656 }
1657 } // End synchronization
1658
1659 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1760 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1761 node_offsets[n->_idx] = masm->offset();
1762 }
1763 #endif
1764 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1765
1766 // "Normal" instruction case
1767 DEBUG_ONLY(uint instr_offset = masm->offset());
1768 n->emit(masm, C->regalloc());
1769 current_offset = masm->offset();
1770
1771 // Above we only verified that there is enough space in the instruction section.
1772 // However, the instruction may emit stubs that cause code buffer expansion.
1773 // Bail out here if expansion failed due to a lack of code cache space.
1774 if (C->failing()) {
1775 return;
1776 }
1777
1778 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1779 "ret_addr_offset() not within emitted code");
1780 #ifdef ASSERT
1781 uint n_size = n->size(C->regalloc());
1782 if (n_size < (current_offset-instr_offset)) {
1783 MachNode* mach = n->as_Mach();
1784 n->dump();
1785 mach->dump_format(C->regalloc(), tty);
1786 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1787 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1788 tty->print_cr(" ------------------- ");
1789 BufferBlob* blob = this->scratch_buffer_blob();
1790 address blob_begin = blob->content_begin();
1791 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1792 assert(false, "wrong size of mach node");
1793 }
1794 #endif
1795 non_safepoints.observe_instruction(n, current_offset);
1796
1797 // mcall is last "call" that can be a safepoint
1798 // record it so we can see if a poll will directly follow it
1799 // in which case we'll need a pad to make the PcDesc sites unique
3195 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3196 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3197 }
3198 }
3199 // Do not allow defs of new derived values to float above GC
3200 // points unless the base is definitely available at the GC point.
3201
3202 Node *m = b->get_node(i);
3203
3204 // Add precedence edge from following safepoint to use of derived pointer
3205 if( last_safept_node != end_node &&
3206 m != last_safept_node) {
3207 for (uint k = 1; k < m->req(); k++) {
3208 const Type *t = m->in(k)->bottom_type();
3209 if( t->isa_oop_ptr() &&
3210 t->is_ptr()->offset() != 0 ) {
3211 last_safept_node->add_prec( m );
3212 break;
3213 }
3214 }
3215
3216 // Do not allow a CheckCastPP node whose input is a raw pointer to
3217 // float past a safepoint. This can occur when a buffered inline
3218 // type is allocated in a loop and the CheckCastPP from that
3219 // allocation is reused outside the loop. If the use inside the
3220 // loop is scalarized the CheckCastPP will no longer be connected
3221 // to the loop safepoint. See JDK-8264340.
3222 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3223 Node *def = m->in(1);
3224 if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3225 last_safept_node->add_prec(m);
3226 }
3227 }
3228 }
3229
3230 if( n->jvms() ) { // Precedence edge from derived to safept
3231 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3232 if( b->get_node(last_safept) != last_safept_node ) {
3233 last_safept = b->find_node(last_safept_node);
3234 }
3235 for( uint j=last_safept; j > i; j-- ) {
3236 Node *mach = b->get_node(j);
3237 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3238 mach->add_prec( n );
3239 }
3240 last_safept = i;
3241 last_safept_node = m;
3242 }
3243 }
3244
3245 if (fat_proj_seen) {
3246 // Garbage collect pinch nodes that were not consumed.
3247 // They are usually created by a fat kill MachProj for a call.
3366 }
3367 #endif
3368
3369 //-----------------------init_scratch_buffer_blob------------------------------
3370 // Construct a temporary BufferBlob and cache it for this compile.
3371 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3372 // If there is already a scratch buffer blob allocated and the
3373 // constant section is big enough, use it. Otherwise free the
3374 // current and allocate a new one.
3375 BufferBlob* blob = scratch_buffer_blob();
3376 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3377 // Use the current blob.
3378 } else {
3379 if (blob != nullptr) {
3380 BufferBlob::free(blob);
3381 }
3382
3383 ResourceMark rm;
3384 _scratch_const_size = const_size;
3385 int size = C2Compiler::initial_code_buffer_size(const_size);
3386 if (C->has_scalarized_args()) {
3387 // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3388 // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3389 ciMethod* method = C->method();
3390 int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3391 int arg_num = 0;
3392 if (!method->is_static()) {
3393 if (method->is_scalarized_arg(arg_num)) {
3394 size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3395 }
3396 arg_num++;
3397 }
3398 for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3399 if (method->is_scalarized_arg(arg_num)) {
3400 size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3401 }
3402 arg_num++;
3403 }
3404 }
3405 blob = BufferBlob::create("Compile::scratch_buffer", size);
3406 // Record the buffer blob for next time.
3407 set_scratch_buffer_blob(blob);
3408 // Have we run out of code space?
3409 if (scratch_buffer_blob() == nullptr) {
3410 // Let CompilerBroker disable further compilations.
3411 C->record_failure("Not enough space for scratch buffer in CodeCache");
3412 return;
3413 }
3414 }
3415
3416 // Initialize the relocation buffers
3417 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3418 set_scratch_locs_memory(locs_buf);
3419 }
3420
3421
3422 //-----------------------scratch_emit_size-------------------------------------
3423 // Helper function that computes size by emitting code
3424 uint PhaseOutput::scratch_emit_size(const Node* n) {
3455 buf.insts()->set_scratch_emit();
3456 buf.stubs()->set_scratch_emit();
3457
3458 // Do the emission.
3459
3460 Label fakeL; // Fake label for branch instructions.
3461 Label* saveL = nullptr;
3462 uint save_bnum = 0;
3463 bool is_branch = n->is_MachBranch();
3464 C2_MacroAssembler masm(&buf);
3465 masm.bind(fakeL);
3466 if (is_branch) {
3467 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3468 n->as_MachBranch()->label_set(&fakeL, 0);
3469 }
3470 n->emit(&masm, C->regalloc());
3471
3472 // Emitting into the scratch buffer should not fail
3473 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3474
3475 // Restore label.
3476 if (is_branch) {
3477 n->as_MachBranch()->label_set(saveL, save_bnum);
3478 }
3479
3480 // End scratch_emit_size section.
3481 set_in_scratch_emit_size(false);
3482
3483 return buf.insts_size();
3484 }
3485
3486 void PhaseOutput::install() {
3487 if (!C->should_install_code()) {
3488 return;
3489 } else if (C->stub_function() != nullptr) {
3490 install_stub(C->stub_name());
3491 } else {
3492 install_code(C->method(),
3493 C->entry_bci(),
3494 CompileBroker::compiler2(),
3495 C->has_unsafe_access(),
3496 SharedRuntime::is_wide_vector(C->max_vector_size()));
3497 }
3498 }
3499
3500 void PhaseOutput::install_code(ciMethod* target,
3501 int entry_bci,
3502 AbstractCompiler* compiler,
3503 bool has_unsafe_access,
3504 bool has_wide_vectors) {
3505 // Check if we want to skip execution of all compiled code.
3506 {
3507 #ifndef PRODUCT
3508 if (OptoNoExecute) {
3509 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3510 return;
3511 }
3512 #endif
3513 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3514
3515 if (C->is_osr_compilation()) {
3516 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3517 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3518 } else {
3519 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3520 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3521 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3522 }
3523 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3524 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3525 }
3526 if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3527 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3528 }
3529 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3530 }
3531
3532 C->env()->register_method(target,
3533 entry_bci,
3534 &_code_offsets,
3535 _orig_pc_slot_offset_in_bytes,
3536 code_buffer(),
3537 frame_size_in_words(),
3538 _oop_map_set,
3539 &_handler_table,
3540 inc_table(),
3541 compiler,
3542 has_unsafe_access,
3543 SharedRuntime::is_wide_vector(C->max_vector_size()),
3544 C->has_monitors(),
3545 C->has_scoped_access(),
3546 0);
3547
3548 if (C->log() != nullptr) { // Print code cache state into compiler log
3549 C->log()->code_cache_state();
3550 }
3551 }
3552 }
3553 void PhaseOutput::install_stub(const char* stub_name) {
3554 // Entry point will be accessed using stub_entry_point();
3555 if (code_buffer() == nullptr) {
3556 Matcher::soft_match_failure();
3557 } else {
3558 if (PrintAssembly && (WizardMode || Verbose))
3559 tty->print_cr("### Stub::%s", stub_name);
3560
3561 if (!C->failing()) {
3562 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3563
3564 // Make the NMethod
3565 // For now we mark the frame as never safe for profile stackwalking
3566 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|