16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/c2/barrierSetC2.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/allocation.hpp"
39 #include "opto/ad.hpp"
40 #include "opto/block.hpp"
41 #include "opto/c2compiler.hpp"
42 #include "opto/c2_MacroAssembler.hpp"
43 #include "opto/callnode.hpp"
44 #include "opto/cfgnode.hpp"
45 #include "opto/locknode.hpp"
46 #include "opto/machnode.hpp"
47 #include "opto/node.hpp"
48 #include "opto/optoreg.hpp"
49 #include "opto/output.hpp"
50 #include "opto/regalloc.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/type.hpp"
54 #include "runtime/handles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
229 _first_block_size(0),
230 _handler_table(),
231 _inc_table(),
232 _stub_list(),
233 _oop_map_set(nullptr),
234 _scratch_buffer_blob(nullptr),
235 _scratch_locs_memory(nullptr),
236 _scratch_const_size(-1),
237 _in_scratch_emit_size(false),
238 _frame_slots(0),
239 _code_offsets(),
240 _node_bundling_limit(0),
241 _node_bundling_base(nullptr),
242 _orig_pc_slot(0),
243 _orig_pc_slot_offset_in_bytes(0),
244 _buf_sizes(),
245 _block(nullptr),
246 _index(0) {
247 C->set_output(this);
248 if (C->stub_name() == nullptr) {
249 _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
250 }
251 }
252
253 PhaseOutput::~PhaseOutput() {
254 C->set_output(nullptr);
255 if (_scratch_buffer_blob != nullptr) {
256 BufferBlob::free(_scratch_buffer_blob);
257 }
258 }
259
260 void PhaseOutput::perform_mach_node_analysis() {
261 // Late barrier analysis must be done after schedule and bundle
262 // Otherwise liveness based spilling will fail
263 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
264 bs->late_barrier_analysis();
265
266 pd_perform_mach_node_analysis();
267
268 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
269 }
270
271 // Convert Nodes to instruction bits and pass off to the VM
272 void PhaseOutput::Output() {
273 // RootNode goes
274 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
275
276 // The number of new nodes (mostly MachNop) is proportional to
277 // the number of java calls and inner loops which are aligned.
278 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
279 C->inner_loops()*(OptoLoopAlignment-1)),
280 "out of nodes before code generation" ) ) {
281 return;
282 }
283 // Make sure I can find the Start Node
284 Block *entry = C->cfg()->get_block(1);
285 Block *broot = C->cfg()->get_root_block();
286
287 const StartNode *start = entry->head()->as_Start();
288
289 // Replace StartNode with prolog
290 MachPrologNode *prolog = new MachPrologNode();
291 entry->map_node(prolog, 0);
292 C->cfg()->map_node_to_block(prolog, entry);
293 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
294
295 // Virtual methods need an unverified entry point
296
297 if( C->is_osr_compilation() ) {
298 if( PoisonOSREntry ) {
299 // TODO: Should use a ShouldNotReachHereNode...
300 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
301 }
302 } else {
303 if( C->method() && !C->method()->flags().is_static() ) {
304 // Insert unvalidated entry point
305 C->cfg()->insert( broot, 0, new MachUEPNode() );
306 }
307
308 }
309
310 // Break before main entry point
311 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
312 (OptoBreakpoint && C->is_method_compilation()) ||
313 (OptoBreakpointOSR && C->is_osr_compilation()) ||
314 (OptoBreakpointC2R && !C->method()) ) {
315 // checking for C->method() means that OptoBreakpoint does not apply to
316 // runtime stubs or frame converters
317 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
318 }
319
320 // Insert epilogs before every return
321 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
322 Block* block = C->cfg()->get_block(i);
323 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
324 Node* m = block->end();
325 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
326 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
327 block->add_inst(epilog);
328 C->cfg()->map_node_to_block(epilog, block);
329 }
330 }
331 }
332
333 // Keeper of sizing aspects
334 _buf_sizes = BufferSizingData();
335
336 // Initialize code buffer
337 estimate_buffer_size(_buf_sizes._const);
338 if (C->failing()) return;
339
340 // Pre-compute the length of blocks and replace
341 // long branches with short if machine supports it.
342 // Must be done before ScheduleAndBundle due to SPARC delay slots
343 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
344 blk_starts[0] = 0;
345 shorten_branches(blk_starts);
346
347 ScheduleAndBundle();
348 if (C->failing()) {
349 return;
350 }
351
352 perform_mach_node_analysis();
353
354 // Complete sizing of codebuffer
355 CodeBuffer* cb = init_buffer();
356 if (cb == nullptr || C->failing()) {
357 return;
358 }
359
360 BuildOopMaps();
361
362 if (C->failing()) {
363 return;
364 }
365
366 C2_MacroAssembler masm(cb);
488 // Sum all instruction sizes to compute block size
489 uint last_inst = block->number_of_nodes();
490 uint blk_size = 0;
491 for (uint j = 0; j < last_inst; j++) {
492 _index = j;
493 Node* nj = block->get_node(_index);
494 // Handle machine instruction nodes
495 if (nj->is_Mach()) {
496 MachNode* mach = nj->as_Mach();
497 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
498 reloc_size += mach->reloc();
499 if (mach->is_MachCall()) {
500 // add size information for trampoline stub
501 // class CallStubImpl is platform-specific and defined in the *.ad files.
502 stub_size += CallStubImpl::size_call_trampoline();
503 reloc_size += CallStubImpl::reloc_call_trampoline();
504
505 MachCallNode *mcall = mach->as_MachCall();
506 // This destination address is NOT PC-relative
507
508 mcall->method_set((intptr_t)mcall->entry_point());
509
510 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
511 stub_size += CompiledDirectCall::to_interp_stub_size();
512 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
513 }
514 } else if (mach->is_MachSafePoint()) {
515 // If call/safepoint are adjacent, account for possible
516 // nop to disambiguate the two safepoints.
517 // ScheduleAndBundle() can rearrange nodes in a block,
518 // check for all offsets inside this block.
519 if (last_call_adr >= blk_starts[i]) {
520 blk_size += nop_size;
521 }
522 }
523 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
524 // Nop is inserted between "avoid back to back" instructions.
525 // ScheduleAndBundle() can rearrange nodes in a block,
526 // check for all offsets inside this block.
527 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
528 blk_size += nop_size;
743 // New functionality:
744 // Assert if the local is not top. In product mode let the new node
745 // override the old entry.
746 assert(local == C->top(), "LocArray collision");
747 if (local == C->top()) {
748 return;
749 }
750 array->pop();
751 }
752 const Type *t = local->bottom_type();
753
754 // Is it a safepoint scalar object node?
755 if (local->is_SafePointScalarObject()) {
756 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
757
758 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
759 if (sv == nullptr) {
760 ciKlass* cik = t->is_oopptr()->exact_klass();
761 assert(cik->is_instance_klass() ||
762 cik->is_array_klass(), "Not supported allocation.");
763 sv = new ObjectValue(spobj->_idx,
764 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
765 set_sv_for_object_node(objs, sv);
766
767 uint first_ind = spobj->first_index(sfpt->jvms());
768 for (uint i = 0; i < spobj->n_fields(); i++) {
769 Node* fld_node = sfpt->in(first_ind+i);
770 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
771 }
772 }
773 array->append(sv);
774 return;
775 } else if (local->is_SafePointScalarMerge()) {
776 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
777 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
778
779 if (mv == nullptr) {
780 GrowableArray<ScopeValue*> deps;
781
782 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
783 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
784 assert(deps.length() == 1, "missing value");
785
786 int selector_idx = smerge->selector_idx(sfpt->jvms());
787 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
994 continue;
995 }
996
997 ObjectValue* other = sv_for_node_id(objs, n->_idx);
998 if (ov == other) {
999 return true;
1000 }
1001 }
1002 return false;
1003 }
1004
1005 //--------------------------Process_OopMap_Node--------------------------------
1006 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1007 // Handle special safepoint nodes for synchronization
1008 MachSafePointNode *sfn = mach->as_MachSafePoint();
1009 MachCallNode *mcall;
1010
1011 int safepoint_pc_offset = current_offset;
1012 bool is_method_handle_invoke = false;
1013 bool return_oop = false;
1014 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1015 bool arg_escape = false;
1016
1017 // Add the safepoint in the DebugInfoRecorder
1018 if( !mach->is_MachCall() ) {
1019 mcall = nullptr;
1020 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1021 } else {
1022 mcall = mach->as_MachCall();
1023
1024 // Is the call a MethodHandle call?
1025 if (mcall->is_MachCallJava()) {
1026 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1027 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1028 is_method_handle_invoke = true;
1029 }
1030 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1031 }
1032
1033 // Check if a call returns an object.
1034 if (mcall->returns_pointer()) {
1035 return_oop = true;
1036 }
1037 safepoint_pc_offset += mcall->ret_addr_offset();
1038 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1039 }
1040
1041 // Loop over the JVMState list to add scope information
1042 // Do not skip safepoints with a null method, they need monitor info
1043 JVMState* youngest_jvms = sfn->jvms();
1044 int max_depth = youngest_jvms->depth();
1045
1046 // Allocate the object pool for scalar-replaced objects -- the map from
1047 // small-integer keys (which can be recorded in the local and ostack
1048 // arrays) to descriptions of the object state.
1049 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1050
1051 // Visit scopes from oldest to youngest.
1052 for (int depth = 1; depth <= max_depth; depth++) {
1053 JVMState* jvms = youngest_jvms->of_depth(depth);
1054 int idx;
1055 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1056 // Safepoints that do not have method() set only provide oop-map and monitor info
1195 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1196 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1197
1198 // Make method available for all Safepoints
1199 ciMethod* scope_method = method ? method : C->method();
1200 // Describe the scope here
1201 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1202 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1203 // Now we can describe the scope.
1204 methodHandle null_mh;
1205 bool rethrow_exception = false;
1206 C->debug_info()->describe_scope(
1207 safepoint_pc_offset,
1208 null_mh,
1209 scope_method,
1210 jvms->bci(),
1211 jvms->should_reexecute(),
1212 rethrow_exception,
1213 is_method_handle_invoke,
1214 return_oop,
1215 has_ea_local_in_scope,
1216 arg_escape,
1217 locvals,
1218 expvals,
1219 monvals
1220 );
1221 } // End jvms loop
1222
1223 // Mark the end of the scope set.
1224 C->debug_info()->end_safepoint(safepoint_pc_offset);
1225 }
1226
1227
1228
1229 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1230 class NonSafepointEmitter {
1231 Compile* C;
1232 JVMState* _pending_jvms;
1233 int _pending_offset;
1234
1569 MachNode *nop = new MachNopNode(nops_cnt);
1570 block->insert_node(nop, j++);
1571 last_inst++;
1572 C->cfg()->map_node_to_block(nop, block);
1573 // Ensure enough space.
1574 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1575 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1576 C->record_failure("CodeCache is full");
1577 return;
1578 }
1579 nop->emit(masm, C->regalloc());
1580 masm->code()->flush_bundle(true);
1581 current_offset = masm->offset();
1582 }
1583
1584 bool observe_safepoint = is_sfn;
1585 // Remember the start of the last call in a basic block
1586 if (is_mcall) {
1587 MachCallNode *mcall = mach->as_MachCall();
1588
1589 // This destination address is NOT PC-relative
1590 mcall->method_set((intptr_t)mcall->entry_point());
1591
1592 // Save the return address
1593 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1594
1595 observe_safepoint = mcall->guaranteed_safepoint();
1596 }
1597
1598 // sfn will be valid whenever mcall is valid now because of inheritance
1599 if (observe_safepoint) {
1600 // Handle special safepoint nodes for synchronization
1601 if (!is_mcall) {
1602 MachSafePointNode *sfn = mach->as_MachSafePoint();
1603 // !!!!! Stubs only need an oopmap right now, so bail out
1604 if (sfn->jvms()->method() == nullptr) {
1605 // Write the oopmap directly to the code blob??!!
1606 continue;
1607 }
1608 } // End synchronization
1609
1610 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1711 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1712 node_offsets[n->_idx] = masm->offset();
1713 }
1714 #endif
1715 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1716
1717 // "Normal" instruction case
1718 DEBUG_ONLY(uint instr_offset = masm->offset());
1719 n->emit(masm, C->regalloc());
1720 current_offset = masm->offset();
1721
1722 // Above we only verified that there is enough space in the instruction section.
1723 // However, the instruction may emit stubs that cause code buffer expansion.
1724 // Bail out here if expansion failed due to a lack of code cache space.
1725 if (C->failing()) {
1726 return;
1727 }
1728
1729 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1730 "ret_addr_offset() not within emitted code");
1731
1732 #ifdef ASSERT
1733 uint n_size = n->size(C->regalloc());
1734 if (n_size < (current_offset-instr_offset)) {
1735 MachNode* mach = n->as_Mach();
1736 n->dump();
1737 mach->dump_format(C->regalloc(), tty);
1738 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1739 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1740 tty->print_cr(" ------------------- ");
1741 BufferBlob* blob = this->scratch_buffer_blob();
1742 address blob_begin = blob->content_begin();
1743 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1744 assert(false, "wrong size of mach node");
1745 }
1746 #endif
1747 non_safepoints.observe_instruction(n, current_offset);
1748
1749 // mcall is last "call" that can be a safepoint
1750 // record it so we can see if a poll will directly follow it
1751 // in which case we'll need a pad to make the PcDesc sites unique
3147 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3148 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3149 }
3150 }
3151 // Do not allow defs of new derived values to float above GC
3152 // points unless the base is definitely available at the GC point.
3153
3154 Node *m = b->get_node(i);
3155
3156 // Add precedence edge from following safepoint to use of derived pointer
3157 if( last_safept_node != end_node &&
3158 m != last_safept_node) {
3159 for (uint k = 1; k < m->req(); k++) {
3160 const Type *t = m->in(k)->bottom_type();
3161 if( t->isa_oop_ptr() &&
3162 t->is_ptr()->offset() != 0 ) {
3163 last_safept_node->add_prec( m );
3164 break;
3165 }
3166 }
3167 }
3168
3169 if( n->jvms() ) { // Precedence edge from derived to safept
3170 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3171 if( b->get_node(last_safept) != last_safept_node ) {
3172 last_safept = b->find_node(last_safept_node);
3173 }
3174 for( uint j=last_safept; j > i; j-- ) {
3175 Node *mach = b->get_node(j);
3176 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3177 mach->add_prec( n );
3178 }
3179 last_safept = i;
3180 last_safept_node = m;
3181 }
3182 }
3183
3184 if (fat_proj_seen) {
3185 // Garbage collect pinch nodes that were not consumed.
3186 // They are usually created by a fat kill MachProj for a call.
3305 }
3306 #endif
3307
3308 //-----------------------init_scratch_buffer_blob------------------------------
3309 // Construct a temporary BufferBlob and cache it for this compile.
3310 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3311 // If there is already a scratch buffer blob allocated and the
3312 // constant section is big enough, use it. Otherwise free the
3313 // current and allocate a new one.
3314 BufferBlob* blob = scratch_buffer_blob();
3315 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3316 // Use the current blob.
3317 } else {
3318 if (blob != nullptr) {
3319 BufferBlob::free(blob);
3320 }
3321
3322 ResourceMark rm;
3323 _scratch_const_size = const_size;
3324 int size = C2Compiler::initial_code_buffer_size(const_size);
3325 blob = BufferBlob::create("Compile::scratch_buffer", size);
3326 // Record the buffer blob for next time.
3327 set_scratch_buffer_blob(blob);
3328 // Have we run out of code space?
3329 if (scratch_buffer_blob() == nullptr) {
3330 // Let CompilerBroker disable further compilations.
3331 C->record_failure("Not enough space for scratch buffer in CodeCache");
3332 return;
3333 }
3334 }
3335
3336 // Initialize the relocation buffers
3337 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3338 set_scratch_locs_memory(locs_buf);
3339 }
3340
3341
3342 //-----------------------scratch_emit_size-------------------------------------
3343 // Helper function that computes size by emitting code
3344 uint PhaseOutput::scratch_emit_size(const Node* n) {
3375 buf.insts()->set_scratch_emit();
3376 buf.stubs()->set_scratch_emit();
3377
3378 // Do the emission.
3379
3380 Label fakeL; // Fake label for branch instructions.
3381 Label* saveL = nullptr;
3382 uint save_bnum = 0;
3383 bool is_branch = n->is_MachBranch();
3384 C2_MacroAssembler masm(&buf);
3385 masm.bind(fakeL);
3386 if (is_branch) {
3387 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3388 n->as_MachBranch()->label_set(&fakeL, 0);
3389 }
3390 n->emit(&masm, C->regalloc());
3391
3392 // Emitting into the scratch buffer should not fail
3393 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3394
3395 if (is_branch) // Restore label.
3396 n->as_MachBranch()->label_set(saveL, save_bnum);
3397
3398 // End scratch_emit_size section.
3399 set_in_scratch_emit_size(false);
3400
3401 return buf.insts_size();
3402 }
3403
3404 void PhaseOutput::install() {
3405 if (!C->should_install_code()) {
3406 return;
3407 } else if (C->stub_function() != nullptr) {
3408 install_stub(C->stub_name());
3409 } else {
3410 install_code(C->method(),
3411 C->entry_bci(),
3412 CompileBroker::compiler2(),
3413 C->has_unsafe_access(),
3414 SharedRuntime::is_wide_vector(C->max_vector_size()));
3415 }
3416 }
3417
3418 void PhaseOutput::install_code(ciMethod* target,
3419 int entry_bci,
3420 AbstractCompiler* compiler,
3421 bool has_unsafe_access,
3422 bool has_wide_vectors) {
3423 // Check if we want to skip execution of all compiled code.
3424 {
3425 #ifndef PRODUCT
3426 if (OptoNoExecute) {
3427 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3428 return;
3429 }
3430 #endif
3431 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3432
3433 if (C->is_osr_compilation()) {
3434 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3435 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3436 } else {
3437 if (!target->is_static()) {
3438 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3439 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3440 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3441 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3442 }
3443 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3444 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3445 }
3446
3447 C->env()->register_method(target,
3448 entry_bci,
3449 &_code_offsets,
3450 _orig_pc_slot_offset_in_bytes,
3451 code_buffer(),
3452 frame_size_in_words(),
3453 oop_map_set(),
3454 &_handler_table,
3455 inc_table(),
3456 compiler,
3457 has_unsafe_access,
3458 SharedRuntime::is_wide_vector(C->max_vector_size()),
3459 C->has_monitors(),
3460 C->has_scoped_access(),
3461 0);
3462
3463 if (C->log() != nullptr) { // Print code cache state into compiler log
3464 C->log()->code_cache_state();
3465 }
3466 }
3467 }
3468 void PhaseOutput::install_stub(const char* stub_name) {
3469 // Entry point will be accessed using stub_entry_point();
3470 if (code_buffer() == nullptr) {
3471 Matcher::soft_match_failure();
3472 } else {
3473 if (PrintAssembly && (WizardMode || Verbose))
3474 tty->print_cr("### Stub::%s", stub_name);
3475
3476 if (!C->failing()) {
3477 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3478
3479 // Make the NMethod
3480 // For now we mark the frame as never safe for profile stackwalking
3481 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/gc_globals.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/allocation.hpp"
40 #include "opto/ad.hpp"
41 #include "opto/block.hpp"
42 #include "opto/c2compiler.hpp"
43 #include "opto/c2_MacroAssembler.hpp"
44 #include "opto/callnode.hpp"
45 #include "opto/cfgnode.hpp"
46 #include "opto/locknode.hpp"
47 #include "opto/machnode.hpp"
48 #include "opto/node.hpp"
49 #include "opto/optoreg.hpp"
50 #include "opto/output.hpp"
51 #include "opto/regalloc.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/subnode.hpp"
54 #include "opto/type.hpp"
55 #include "runtime/handles.inline.hpp"
56 #include "runtime/sharedRuntime.hpp"
230 _first_block_size(0),
231 _handler_table(),
232 _inc_table(),
233 _stub_list(),
234 _oop_map_set(nullptr),
235 _scratch_buffer_blob(nullptr),
236 _scratch_locs_memory(nullptr),
237 _scratch_const_size(-1),
238 _in_scratch_emit_size(false),
239 _frame_slots(0),
240 _code_offsets(),
241 _node_bundling_limit(0),
242 _node_bundling_base(nullptr),
243 _orig_pc_slot(0),
244 _orig_pc_slot_offset_in_bytes(0),
245 _buf_sizes(),
246 _block(nullptr),
247 _index(0) {
248 C->set_output(this);
249 if (C->stub_name() == nullptr) {
250 int fixed_slots = C->fixed_slots();
251 if (C->needs_stack_repair()) {
252 fixed_slots -= 2;
253 }
254 // TODO 8284443 Only reserve extra slot if needed
255 if (InlineTypeReturnedAsFields) {
256 fixed_slots -= 2;
257 }
258 _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
259 }
260 }
261
262 PhaseOutput::~PhaseOutput() {
263 C->set_output(nullptr);
264 if (_scratch_buffer_blob != nullptr) {
265 BufferBlob::free(_scratch_buffer_blob);
266 }
267 }
268
269 void PhaseOutput::perform_mach_node_analysis() {
270 // Late barrier analysis must be done after schedule and bundle
271 // Otherwise liveness based spilling will fail
272 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
273 bs->late_barrier_analysis();
274
275 pd_perform_mach_node_analysis();
276
277 C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
278 }
279
280 // Convert Nodes to instruction bits and pass off to the VM
281 void PhaseOutput::Output() {
282 // RootNode goes
283 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
284
285 // The number of new nodes (mostly MachNop) is proportional to
286 // the number of java calls and inner loops which are aligned.
287 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
288 C->inner_loops()*(OptoLoopAlignment-1)),
289 "out of nodes before code generation" ) ) {
290 return;
291 }
292 // Make sure I can find the Start Node
293 Block *entry = C->cfg()->get_block(1);
294 Block *broot = C->cfg()->get_root_block();
295
296 const StartNode *start = entry->head()->as_Start();
297
298 // Replace StartNode with prolog
299 Label verified_entry;
300 MachPrologNode* prolog = new MachPrologNode(&verified_entry);
301 entry->map_node(prolog, 0);
302 C->cfg()->map_node_to_block(prolog, entry);
303 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
304
305 // Virtual methods need an unverified entry point
306 if (C->is_osr_compilation()) {
307 if (PoisonOSREntry) {
308 // TODO: Should use a ShouldNotReachHereNode...
309 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
310 }
311 } else {
312 if (C->method()) {
313 if (C->method()->has_scalarized_args()) {
314 // Add entry point to unpack all inline type arguments
315 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
316 if (!C->method()->is_static()) {
317 // Add verified/unverified entry points to only unpack inline type receiver at interface calls
318 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
319 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
320 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
321 }
322 } else if (!C->method()->is_static()) {
323 // Insert unvalidated entry point
324 C->cfg()->insert(broot, 0, new MachUEPNode());
325 }
326 }
327 }
328
329 // Break before main entry point
330 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
331 (OptoBreakpoint && C->is_method_compilation()) ||
332 (OptoBreakpointOSR && C->is_osr_compilation()) ||
333 (OptoBreakpointC2R && !C->method()) ) {
334 // checking for C->method() means that OptoBreakpoint does not apply to
335 // runtime stubs or frame converters
336 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
337 }
338
339 // Insert epilogs before every return
340 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
341 Block* block = C->cfg()->get_block(i);
342 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
343 Node* m = block->end();
344 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
345 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
346 block->add_inst(epilog);
347 C->cfg()->map_node_to_block(epilog, block);
348 }
349 }
350 }
351
352 // Keeper of sizing aspects
353 _buf_sizes = BufferSizingData();
354
355 // Initialize code buffer
356 estimate_buffer_size(_buf_sizes._const);
357 if (C->failing()) return;
358
359 // Pre-compute the length of blocks and replace
360 // long branches with short if machine supports it.
361 // Must be done before ScheduleAndBundle due to SPARC delay slots
362 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
363 blk_starts[0] = 0;
364 shorten_branches(blk_starts);
365
366 if (!C->is_osr_compilation() && C->has_scalarized_args()) {
367 // Compute the offsets of the entry points required by the inline type calling convention
368 if (!C->method()->is_static()) {
369 // We have entries at the beginning of the method, implemented by the first 4 nodes.
370 // Entry (unverified) @ offset 0
371 // Verified_Inline_Entry_RO
372 // Inline_Entry (unverified)
373 // Verified_Inline_Entry
374 uint offset = 0;
375 _code_offsets.set_value(CodeOffsets::Entry, offset);
376
377 offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
378 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
379
380 offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
381 _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
382
383 offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
384 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
385 } else {
386 _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
387 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
388 }
389 }
390
391 ScheduleAndBundle();
392 if (C->failing()) {
393 return;
394 }
395
396 perform_mach_node_analysis();
397
398 // Complete sizing of codebuffer
399 CodeBuffer* cb = init_buffer();
400 if (cb == nullptr || C->failing()) {
401 return;
402 }
403
404 BuildOopMaps();
405
406 if (C->failing()) {
407 return;
408 }
409
410 C2_MacroAssembler masm(cb);
532 // Sum all instruction sizes to compute block size
533 uint last_inst = block->number_of_nodes();
534 uint blk_size = 0;
535 for (uint j = 0; j < last_inst; j++) {
536 _index = j;
537 Node* nj = block->get_node(_index);
538 // Handle machine instruction nodes
539 if (nj->is_Mach()) {
540 MachNode* mach = nj->as_Mach();
541 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
542 reloc_size += mach->reloc();
543 if (mach->is_MachCall()) {
544 // add size information for trampoline stub
545 // class CallStubImpl is platform-specific and defined in the *.ad files.
546 stub_size += CallStubImpl::size_call_trampoline();
547 reloc_size += CallStubImpl::reloc_call_trampoline();
548
549 MachCallNode *mcall = mach->as_MachCall();
550 // This destination address is NOT PC-relative
551
552 if (mcall->entry_point() != nullptr) {
553 mcall->method_set((intptr_t)mcall->entry_point());
554 }
555
556 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
557 stub_size += CompiledDirectCall::to_interp_stub_size();
558 reloc_size += CompiledDirectCall::reloc_to_interp_stub();
559 }
560 } else if (mach->is_MachSafePoint()) {
561 // If call/safepoint are adjacent, account for possible
562 // nop to disambiguate the two safepoints.
563 // ScheduleAndBundle() can rearrange nodes in a block,
564 // check for all offsets inside this block.
565 if (last_call_adr >= blk_starts[i]) {
566 blk_size += nop_size;
567 }
568 }
569 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
570 // Nop is inserted between "avoid back to back" instructions.
571 // ScheduleAndBundle() can rearrange nodes in a block,
572 // check for all offsets inside this block.
573 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
574 blk_size += nop_size;
789 // New functionality:
790 // Assert if the local is not top. In product mode let the new node
791 // override the old entry.
792 assert(local == C->top(), "LocArray collision");
793 if (local == C->top()) {
794 return;
795 }
796 array->pop();
797 }
798 const Type *t = local->bottom_type();
799
800 // Is it a safepoint scalar object node?
801 if (local->is_SafePointScalarObject()) {
802 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
803
804 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
805 if (sv == nullptr) {
806 ciKlass* cik = t->is_oopptr()->exact_klass();
807 assert(cik->is_instance_klass() ||
808 cik->is_array_klass(), "Not supported allocation.");
809 uint first_ind = spobj->first_index(sfpt->jvms());
810 // Nullable, scalarized inline types have an is_init input
811 // that needs to be checked before using the field values.
812 ScopeValue* is_init = nullptr;
813 if (cik->is_inlinetype()) {
814 Node* init_node = sfpt->in(first_ind++);
815 assert(init_node != nullptr, "is_init node not found");
816 if (!init_node->is_top()) {
817 const TypeInt* init_type = init_node->bottom_type()->is_int();
818 if (init_node->is_Con()) {
819 is_init = new ConstantIntValue(init_type->get_con());
820 } else {
821 OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
822 is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
823 }
824 }
825 }
826 sv = new ObjectValue(spobj->_idx,
827 new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, is_init);
828 set_sv_for_object_node(objs, sv);
829
830 for (uint i = 0; i < spobj->n_fields(); i++) {
831 Node* fld_node = sfpt->in(first_ind+i);
832 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
833 }
834 }
835 array->append(sv);
836 return;
837 } else if (local->is_SafePointScalarMerge()) {
838 SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
839 ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
840
841 if (mv == nullptr) {
842 GrowableArray<ScopeValue*> deps;
843
844 int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
845 (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
846 assert(deps.length() == 1, "missing value");
847
848 int selector_idx = smerge->selector_idx(sfpt->jvms());
849 (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
1056 continue;
1057 }
1058
1059 ObjectValue* other = sv_for_node_id(objs, n->_idx);
1060 if (ov == other) {
1061 return true;
1062 }
1063 }
1064 return false;
1065 }
1066
1067 //--------------------------Process_OopMap_Node--------------------------------
1068 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1069 // Handle special safepoint nodes for synchronization
1070 MachSafePointNode *sfn = mach->as_MachSafePoint();
1071 MachCallNode *mcall;
1072
1073 int safepoint_pc_offset = current_offset;
1074 bool is_method_handle_invoke = false;
1075 bool return_oop = false;
1076 bool return_scalarized = false;
1077 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1078 bool arg_escape = false;
1079
1080 // Add the safepoint in the DebugInfoRecorder
1081 if( !mach->is_MachCall() ) {
1082 mcall = nullptr;
1083 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1084 } else {
1085 mcall = mach->as_MachCall();
1086
1087 // Is the call a MethodHandle call?
1088 if (mcall->is_MachCallJava()) {
1089 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1090 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1091 is_method_handle_invoke = true;
1092 }
1093 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1094 }
1095
1096 // Check if a call returns an object.
1097 if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1098 return_oop = true;
1099 }
1100 if (mcall->returns_scalarized()) {
1101 return_scalarized = true;
1102 }
1103 safepoint_pc_offset += mcall->ret_addr_offset();
1104 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1105 }
1106
1107 // Loop over the JVMState list to add scope information
1108 // Do not skip safepoints with a null method, they need monitor info
1109 JVMState* youngest_jvms = sfn->jvms();
1110 int max_depth = youngest_jvms->depth();
1111
1112 // Allocate the object pool for scalar-replaced objects -- the map from
1113 // small-integer keys (which can be recorded in the local and ostack
1114 // arrays) to descriptions of the object state.
1115 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1116
1117 // Visit scopes from oldest to youngest.
1118 for (int depth = 1; depth <= max_depth; depth++) {
1119 JVMState* jvms = youngest_jvms->of_depth(depth);
1120 int idx;
1121 ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1122 // Safepoints that do not have method() set only provide oop-map and monitor info
1261 DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1262 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1263
1264 // Make method available for all Safepoints
1265 ciMethod* scope_method = method ? method : C->method();
1266 // Describe the scope here
1267 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1268 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1269 // Now we can describe the scope.
1270 methodHandle null_mh;
1271 bool rethrow_exception = false;
1272 C->debug_info()->describe_scope(
1273 safepoint_pc_offset,
1274 null_mh,
1275 scope_method,
1276 jvms->bci(),
1277 jvms->should_reexecute(),
1278 rethrow_exception,
1279 is_method_handle_invoke,
1280 return_oop,
1281 return_scalarized,
1282 has_ea_local_in_scope,
1283 arg_escape,
1284 locvals,
1285 expvals,
1286 monvals
1287 );
1288 } // End jvms loop
1289
1290 // Mark the end of the scope set.
1291 C->debug_info()->end_safepoint(safepoint_pc_offset);
1292 }
1293
1294
1295
1296 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1297 class NonSafepointEmitter {
1298 Compile* C;
1299 JVMState* _pending_jvms;
1300 int _pending_offset;
1301
1636 MachNode *nop = new MachNopNode(nops_cnt);
1637 block->insert_node(nop, j++);
1638 last_inst++;
1639 C->cfg()->map_node_to_block(nop, block);
1640 // Ensure enough space.
1641 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1642 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1643 C->record_failure("CodeCache is full");
1644 return;
1645 }
1646 nop->emit(masm, C->regalloc());
1647 masm->code()->flush_bundle(true);
1648 current_offset = masm->offset();
1649 }
1650
1651 bool observe_safepoint = is_sfn;
1652 // Remember the start of the last call in a basic block
1653 if (is_mcall) {
1654 MachCallNode *mcall = mach->as_MachCall();
1655
1656 if (mcall->entry_point() != nullptr) {
1657 // This destination address is NOT PC-relative
1658 mcall->method_set((intptr_t)mcall->entry_point());
1659 }
1660
1661 // Save the return address
1662 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1663
1664 observe_safepoint = mcall->guaranteed_safepoint();
1665 }
1666
1667 // sfn will be valid whenever mcall is valid now because of inheritance
1668 if (observe_safepoint) {
1669 // Handle special safepoint nodes for synchronization
1670 if (!is_mcall) {
1671 MachSafePointNode *sfn = mach->as_MachSafePoint();
1672 // !!!!! Stubs only need an oopmap right now, so bail out
1673 if (sfn->jvms()->method() == nullptr) {
1674 // Write the oopmap directly to the code blob??!!
1675 continue;
1676 }
1677 } // End synchronization
1678
1679 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1780 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1781 node_offsets[n->_idx] = masm->offset();
1782 }
1783 #endif
1784 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1785
1786 // "Normal" instruction case
1787 DEBUG_ONLY(uint instr_offset = masm->offset());
1788 n->emit(masm, C->regalloc());
1789 current_offset = masm->offset();
1790
1791 // Above we only verified that there is enough space in the instruction section.
1792 // However, the instruction may emit stubs that cause code buffer expansion.
1793 // Bail out here if expansion failed due to a lack of code cache space.
1794 if (C->failing()) {
1795 return;
1796 }
1797
1798 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1799 "ret_addr_offset() not within emitted code");
1800 #ifdef ASSERT
1801 uint n_size = n->size(C->regalloc());
1802 if (n_size < (current_offset-instr_offset)) {
1803 MachNode* mach = n->as_Mach();
1804 n->dump();
1805 mach->dump_format(C->regalloc(), tty);
1806 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1807 Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1808 tty->print_cr(" ------------------- ");
1809 BufferBlob* blob = this->scratch_buffer_blob();
1810 address blob_begin = blob->content_begin();
1811 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1812 assert(false, "wrong size of mach node");
1813 }
1814 #endif
1815 non_safepoints.observe_instruction(n, current_offset);
1816
1817 // mcall is last "call" that can be a safepoint
1818 // record it so we can see if a poll will directly follow it
1819 // in which case we'll need a pad to make the PcDesc sites unique
3215 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3216 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3217 }
3218 }
3219 // Do not allow defs of new derived values to float above GC
3220 // points unless the base is definitely available at the GC point.
3221
3222 Node *m = b->get_node(i);
3223
3224 // Add precedence edge from following safepoint to use of derived pointer
3225 if( last_safept_node != end_node &&
3226 m != last_safept_node) {
3227 for (uint k = 1; k < m->req(); k++) {
3228 const Type *t = m->in(k)->bottom_type();
3229 if( t->isa_oop_ptr() &&
3230 t->is_ptr()->offset() != 0 ) {
3231 last_safept_node->add_prec( m );
3232 break;
3233 }
3234 }
3235
3236 // Do not allow a CheckCastPP node whose input is a raw pointer to
3237 // float past a safepoint. This can occur when a buffered inline
3238 // type is allocated in a loop and the CheckCastPP from that
3239 // allocation is reused outside the loop. If the use inside the
3240 // loop is scalarized the CheckCastPP will no longer be connected
3241 // to the loop safepoint. See JDK-8264340.
3242 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3243 Node *def = m->in(1);
3244 if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3245 last_safept_node->add_prec(m);
3246 }
3247 }
3248 }
3249
3250 if( n->jvms() ) { // Precedence edge from derived to safept
3251 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3252 if( b->get_node(last_safept) != last_safept_node ) {
3253 last_safept = b->find_node(last_safept_node);
3254 }
3255 for( uint j=last_safept; j > i; j-- ) {
3256 Node *mach = b->get_node(j);
3257 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3258 mach->add_prec( n );
3259 }
3260 last_safept = i;
3261 last_safept_node = m;
3262 }
3263 }
3264
3265 if (fat_proj_seen) {
3266 // Garbage collect pinch nodes that were not consumed.
3267 // They are usually created by a fat kill MachProj for a call.
3386 }
3387 #endif
3388
3389 //-----------------------init_scratch_buffer_blob------------------------------
3390 // Construct a temporary BufferBlob and cache it for this compile.
3391 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3392 // If there is already a scratch buffer blob allocated and the
3393 // constant section is big enough, use it. Otherwise free the
3394 // current and allocate a new one.
3395 BufferBlob* blob = scratch_buffer_blob();
3396 if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3397 // Use the current blob.
3398 } else {
3399 if (blob != nullptr) {
3400 BufferBlob::free(blob);
3401 }
3402
3403 ResourceMark rm;
3404 _scratch_const_size = const_size;
3405 int size = C2Compiler::initial_code_buffer_size(const_size);
3406 if (C->has_scalarized_args()) {
3407 // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3408 // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3409 ciMethod* method = C->method();
3410 int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3411 int arg_num = 0;
3412 if (!method->is_static()) {
3413 if (method->is_scalarized_arg(arg_num)) {
3414 size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3415 }
3416 arg_num++;
3417 }
3418 for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3419 if (method->is_scalarized_arg(arg_num)) {
3420 size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3421 }
3422 arg_num++;
3423 }
3424 }
3425 blob = BufferBlob::create("Compile::scratch_buffer", size);
3426 // Record the buffer blob for next time.
3427 set_scratch_buffer_blob(blob);
3428 // Have we run out of code space?
3429 if (scratch_buffer_blob() == nullptr) {
3430 // Let CompilerBroker disable further compilations.
3431 C->record_failure("Not enough space for scratch buffer in CodeCache");
3432 return;
3433 }
3434 }
3435
3436 // Initialize the relocation buffers
3437 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3438 set_scratch_locs_memory(locs_buf);
3439 }
3440
3441
3442 //-----------------------scratch_emit_size-------------------------------------
3443 // Helper function that computes size by emitting code
3444 uint PhaseOutput::scratch_emit_size(const Node* n) {
3475 buf.insts()->set_scratch_emit();
3476 buf.stubs()->set_scratch_emit();
3477
3478 // Do the emission.
3479
3480 Label fakeL; // Fake label for branch instructions.
3481 Label* saveL = nullptr;
3482 uint save_bnum = 0;
3483 bool is_branch = n->is_MachBranch();
3484 C2_MacroAssembler masm(&buf);
3485 masm.bind(fakeL);
3486 if (is_branch) {
3487 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3488 n->as_MachBranch()->label_set(&fakeL, 0);
3489 }
3490 n->emit(&masm, C->regalloc());
3491
3492 // Emitting into the scratch buffer should not fail
3493 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3494
3495 // Restore label.
3496 if (is_branch) {
3497 n->as_MachBranch()->label_set(saveL, save_bnum);
3498 }
3499
3500 // End scratch_emit_size section.
3501 set_in_scratch_emit_size(false);
3502
3503 return buf.insts_size();
3504 }
3505
3506 void PhaseOutput::install() {
3507 if (!C->should_install_code()) {
3508 return;
3509 } else if (C->stub_function() != nullptr) {
3510 install_stub(C->stub_name());
3511 } else {
3512 install_code(C->method(),
3513 C->entry_bci(),
3514 CompileBroker::compiler2(),
3515 C->has_unsafe_access(),
3516 SharedRuntime::is_wide_vector(C->max_vector_size()));
3517 }
3518 }
3519
3520 void PhaseOutput::install_code(ciMethod* target,
3521 int entry_bci,
3522 AbstractCompiler* compiler,
3523 bool has_unsafe_access,
3524 bool has_wide_vectors) {
3525 // Check if we want to skip execution of all compiled code.
3526 {
3527 #ifndef PRODUCT
3528 if (OptoNoExecute) {
3529 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3530 return;
3531 }
3532 #endif
3533 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3534
3535 if (C->is_osr_compilation()) {
3536 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3537 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3538 } else {
3539 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3540 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3541 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3542 }
3543 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3544 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3545 }
3546 if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3547 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3548 }
3549 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3550 }
3551
3552 C->env()->register_method(target,
3553 entry_bci,
3554 &_code_offsets,
3555 _orig_pc_slot_offset_in_bytes,
3556 code_buffer(),
3557 frame_size_in_words(),
3558 _oop_map_set,
3559 &_handler_table,
3560 inc_table(),
3561 compiler,
3562 has_unsafe_access,
3563 SharedRuntime::is_wide_vector(C->max_vector_size()),
3564 C->has_monitors(),
3565 C->has_scoped_access(),
3566 0);
3567
3568 if (C->log() != nullptr) { // Print code cache state into compiler log
3569 C->log()->code_cache_state();
3570 }
3571 }
3572 }
3573 void PhaseOutput::install_stub(const char* stub_name) {
3574 // Entry point will be accessed using stub_entry_point();
3575 if (code_buffer() == nullptr) {
3576 Matcher::soft_match_failure();
3577 } else {
3578 if (PrintAssembly && (WizardMode || Verbose))
3579 tty->print_cr("### Stub::%s", stub_name);
3580
3581 if (!C->failing()) {
3582 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3583
3584 // Make the NMethod
3585 // For now we mark the frame as never safe for profile stackwalking
3586 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|