16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/c2/barrierSetC2.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/allocation.hpp"
39 #include "opto/ad.hpp"
40 #include "opto/block.hpp"
41 #include "opto/c2compiler.hpp"
42 #include "opto/callnode.hpp"
43 #include "opto/cfgnode.hpp"
44 #include "opto/locknode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/node.hpp"
47 #include "opto/optoreg.hpp"
48 #include "opto/output.hpp"
49 #include "opto/regalloc.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/subnode.hpp"
52 #include "opto/type.hpp"
53 #include "runtime/handles.inline.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "utilities/macros.hpp"
289 _code_buffer("Compile::Fill_buffer"),
290 _first_block_size(0),
291 _handler_table(),
292 _inc_table(),
293 _oop_map_set(NULL),
294 _scratch_buffer_blob(NULL),
295 _scratch_locs_memory(NULL),
296 _scratch_const_size(-1),
297 _in_scratch_emit_size(false),
298 _frame_slots(0),
299 _code_offsets(),
300 _node_bundling_limit(0),
301 _node_bundling_base(NULL),
302 _orig_pc_slot(0),
303 _orig_pc_slot_offset_in_bytes(0),
304 _buf_sizes(),
305 _block(NULL),
306 _index(0) {
307 C->set_output(this);
308 if (C->stub_name() == NULL) {
309 _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);
310 }
311 }
312
313 PhaseOutput::~PhaseOutput() {
314 C->set_output(NULL);
315 if (_scratch_buffer_blob != NULL) {
316 BufferBlob::free(_scratch_buffer_blob);
317 }
318 }
319
320 void PhaseOutput::perform_mach_node_analysis() {
321 // Late barrier analysis must be done after schedule and bundle
322 // Otherwise liveness based spilling will fail
323 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
324 bs->late_barrier_analysis();
325
326 pd_perform_mach_node_analysis();
327 }
328
329 // Convert Nodes to instruction bits and pass off to the VM
330 void PhaseOutput::Output() {
331 // RootNode goes
332 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
333
334 // The number of new nodes (mostly MachNop) is proportional to
335 // the number of java calls and inner loops which are aligned.
336 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
337 C->inner_loops()*(OptoLoopAlignment-1)),
338 "out of nodes before code generation" ) ) {
339 return;
340 }
341 // Make sure I can find the Start Node
342 Block *entry = C->cfg()->get_block(1);
343 Block *broot = C->cfg()->get_root_block();
344
345 const StartNode *start = entry->head()->as_Start();
346
347 // Replace StartNode with prolog
348 MachPrologNode *prolog = new MachPrologNode();
349 entry->map_node(prolog, 0);
350 C->cfg()->map_node_to_block(prolog, entry);
351 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
352
353 // Virtual methods need an unverified entry point
354
355 if( C->is_osr_compilation() ) {
356 if( PoisonOSREntry ) {
357 // TODO: Should use a ShouldNotReachHereNode...
358 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
359 }
360 } else {
361 if( C->method() && !C->method()->flags().is_static() ) {
362 // Insert unvalidated entry point
363 C->cfg()->insert( broot, 0, new MachUEPNode() );
364 }
365
366 }
367
368 // Break before main entry point
369 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
370 (OptoBreakpoint && C->is_method_compilation()) ||
371 (OptoBreakpointOSR && C->is_osr_compilation()) ||
372 (OptoBreakpointC2R && !C->method()) ) {
373 // checking for C->method() means that OptoBreakpoint does not apply to
374 // runtime stubs or frame converters
375 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
376 }
377
378 // Insert epilogs before every return
379 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
380 Block* block = C->cfg()->get_block(i);
381 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
382 Node* m = block->end();
383 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
384 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
385 block->add_inst(epilog);
386 C->cfg()->map_node_to_block(epilog, block);
387 }
388 }
389 }
390
391 // Keeper of sizing aspects
392 _buf_sizes = BufferSizingData();
393
394 // Initialize code buffer
395 estimate_buffer_size(_buf_sizes._const);
396 if (C->failing()) return;
397
398 // Pre-compute the length of blocks and replace
399 // long branches with short if machine supports it.
400 // Must be done before ScheduleAndBundle due to SPARC delay slots
401 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
402 blk_starts[0] = 0;
403 shorten_branches(blk_starts);
404
405 ScheduleAndBundle();
406 if (C->failing()) {
407 return;
408 }
409
410 perform_mach_node_analysis();
411
412 // Complete sizing of codebuffer
413 CodeBuffer* cb = init_buffer();
414 if (cb == NULL || C->failing()) {
415 return;
416 }
417
418 BuildOopMaps();
419
420 if (C->failing()) {
421 return;
422 }
423
424 fill_buffer(cb, blk_starts);
545 // Sum all instruction sizes to compute block size
546 uint last_inst = block->number_of_nodes();
547 uint blk_size = 0;
548 for (uint j = 0; j < last_inst; j++) {
549 _index = j;
550 Node* nj = block->get_node(_index);
551 // Handle machine instruction nodes
552 if (nj->is_Mach()) {
553 MachNode* mach = nj->as_Mach();
554 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
555 reloc_size += mach->reloc();
556 if (mach->is_MachCall()) {
557 // add size information for trampoline stub
558 // class CallStubImpl is platform-specific and defined in the *.ad files.
559 stub_size += CallStubImpl::size_call_trampoline();
560 reloc_size += CallStubImpl::reloc_call_trampoline();
561
562 MachCallNode *mcall = mach->as_MachCall();
563 // This destination address is NOT PC-relative
564
565 mcall->method_set((intptr_t)mcall->entry_point());
566
567 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
568 stub_size += CompiledStaticCall::to_interp_stub_size();
569 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
570 }
571 } else if (mach->is_MachSafePoint()) {
572 // If call/safepoint are adjacent, account for possible
573 // nop to disambiguate the two safepoints.
574 // ScheduleAndBundle() can rearrange nodes in a block,
575 // check for all offsets inside this block.
576 if (last_call_adr >= blk_starts[i]) {
577 blk_size += nop_size;
578 }
579 }
580 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
581 // Nop is inserted between "avoid back to back" instructions.
582 // ScheduleAndBundle() can rearrange nodes in a block,
583 // check for all offsets inside this block.
584 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
585 blk_size += nop_size;
800 // New functionality:
801 // Assert if the local is not top. In product mode let the new node
802 // override the old entry.
803 assert(local == C->top(), "LocArray collision");
804 if (local == C->top()) {
805 return;
806 }
807 array->pop();
808 }
809 const Type *t = local->bottom_type();
810
811 // Is it a safepoint scalar object node?
812 if (local->is_SafePointScalarObject()) {
813 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
814
815 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
816 if (sv == NULL) {
817 ciKlass* cik = t->is_oopptr()->klass();
818 assert(cik->is_instance_klass() ||
819 cik->is_array_klass(), "Not supported allocation.");
820 ScopeValue* klass_sv = new ConstantOopWriteValue(cik->java_mirror()->constant_encoding());
821 sv = spobj->is_auto_box() ? new AutoBoxObjectValue(spobj->_idx, klass_sv)
822 : new ObjectValue(spobj->_idx, klass_sv);
823 set_sv_for_object_node(objs, sv);
824
825 uint first_ind = spobj->first_index(sfpt->jvms());
826 for (uint i = 0; i < spobj->n_fields(); i++) {
827 Node* fld_node = sfpt->in(first_ind+i);
828 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
829 }
830 }
831 array->append(sv);
832 return;
833 }
834
835 // Grab the register number for the local
836 OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
837 if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
838 // Record the double as two float registers.
839 // The register mask for such a value always specifies two adjacent
840 // float registers, with the lower register number even.
841 // Normally, the allocation of high and low words to these registers
842 // is irrelevant, because nearly all operations on register pairs
843 // (e.g., StoreD) treat them as a single unit.
844 // Here, we assume in addition that the words in these two registers
845 // stored "naturally" (by operations like StoreD and double stores
980 break;
981 }
982 }
983
984 // Determine if this node starts a bundle
985 bool PhaseOutput::starts_bundle(const Node *n) const {
986 return (_node_bundling_limit > n->_idx &&
987 _node_bundling_base[n->_idx].starts_bundle());
988 }
989
990 //--------------------------Process_OopMap_Node--------------------------------
991 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
992 // Handle special safepoint nodes for synchronization
993 MachSafePointNode *sfn = mach->as_MachSafePoint();
994 MachCallNode *mcall;
995
996 int safepoint_pc_offset = current_offset;
997 bool is_method_handle_invoke = false;
998 bool is_opt_native = false;
999 bool return_oop = false;
1000 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1001 bool arg_escape = false;
1002
1003 // Add the safepoint in the DebugInfoRecorder
1004 if( !mach->is_MachCall() ) {
1005 mcall = NULL;
1006 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1007 } else {
1008 mcall = mach->as_MachCall();
1009
1010 // Is the call a MethodHandle call?
1011 if (mcall->is_MachCallJava()) {
1012 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1013 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1014 is_method_handle_invoke = true;
1015 }
1016 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1017 } else if (mcall->is_MachCallNative()) {
1018 is_opt_native = true;
1019 }
1020
1021 // Check if a call returns an object.
1022 if (mcall->returns_pointer()) {
1023 return_oop = true;
1024 }
1025 safepoint_pc_offset += mcall->ret_addr_offset();
1026 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1027 }
1028
1029 // Loop over the JVMState list to add scope information
1030 // Do not skip safepoints with a NULL method, they need monitor info
1031 JVMState* youngest_jvms = sfn->jvms();
1032 int max_depth = youngest_jvms->depth();
1033
1034 // Allocate the object pool for scalar-replaced objects -- the map from
1035 // small-integer keys (which can be recorded in the local and ostack
1036 // arrays) to descriptions of the object state.
1037 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1038
1039 // Visit scopes from oldest to youngest.
1040 for (int depth = 1; depth <= max_depth; depth++) {
1041 JVMState* jvms = youngest_jvms->of_depth(depth);
1042 int idx;
1043 ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1044 // Safepoints that do not have method() set only provide oop-map and monitor info
1130 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1131
1132 // Make method available for all Safepoints
1133 ciMethod* scope_method = method ? method : C->method();
1134 // Describe the scope here
1135 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1136 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1137 // Now we can describe the scope.
1138 methodHandle null_mh;
1139 bool rethrow_exception = false;
1140 C->debug_info()->describe_scope(
1141 safepoint_pc_offset,
1142 null_mh,
1143 scope_method,
1144 jvms->bci(),
1145 jvms->should_reexecute(),
1146 rethrow_exception,
1147 is_method_handle_invoke,
1148 is_opt_native,
1149 return_oop,
1150 has_ea_local_in_scope,
1151 arg_escape,
1152 locvals,
1153 expvals,
1154 monvals
1155 );
1156 } // End jvms loop
1157
1158 // Mark the end of the scope set.
1159 C->debug_info()->end_safepoint(safepoint_pc_offset);
1160 }
1161
1162
1163
1164 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1165 class NonSafepointEmitter {
1166 Compile* C;
1167 JVMState* _pending_jvms;
1168 int _pending_offset;
1169
1506 MachNode *nop = new MachNopNode(nops_cnt);
1507 block->insert_node(nop, j++);
1508 last_inst++;
1509 C->cfg()->map_node_to_block(nop, block);
1510 // Ensure enough space.
1511 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1512 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1513 C->record_failure("CodeCache is full");
1514 return;
1515 }
1516 nop->emit(*cb, C->regalloc());
1517 cb->flush_bundle(true);
1518 current_offset = cb->insts_size();
1519 }
1520
1521 bool observe_safepoint = is_sfn;
1522 // Remember the start of the last call in a basic block
1523 if (is_mcall) {
1524 MachCallNode *mcall = mach->as_MachCall();
1525
1526 // This destination address is NOT PC-relative
1527 mcall->method_set((intptr_t)mcall->entry_point());
1528
1529 // Save the return address
1530 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1531
1532 observe_safepoint = mcall->guaranteed_safepoint();
1533 }
1534
1535 // sfn will be valid whenever mcall is valid now because of inheritance
1536 if (observe_safepoint) {
1537 // Handle special safepoint nodes for synchronization
1538 if (!is_mcall) {
1539 MachSafePointNode *sfn = mach->as_MachSafePoint();
1540 // !!!!! Stubs only need an oopmap right now, so bail out
1541 if (sfn->jvms()->method() == NULL) {
1542 // Write the oopmap directly to the code blob??!!
1543 continue;
1544 }
1545 } // End synchronization
1546
1547 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1671 if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1672 node_offsets[n->_idx] = cb->insts_size();
1673 }
1674 #endif
1675 assert(!C->failing(), "Should not reach here if failing.");
1676
1677 // "Normal" instruction case
1678 DEBUG_ONLY(uint instr_offset = cb->insts_size());
1679 n->emit(*cb, C->regalloc());
1680 current_offset = cb->insts_size();
1681
1682 // Above we only verified that there is enough space in the instruction section.
1683 // However, the instruction may emit stubs that cause code buffer expansion.
1684 // Bail out here if expansion failed due to a lack of code cache space.
1685 if (C->failing()) {
1686 return;
1687 }
1688
1689 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1690 "ret_addr_offset() not within emitted code");
1691
1692 #ifdef ASSERT
1693 uint n_size = n->size(C->regalloc());
1694 if (n_size < (current_offset-instr_offset)) {
1695 MachNode* mach = n->as_Mach();
1696 n->dump();
1697 mach->dump_format(C->regalloc(), tty);
1698 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1699 Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1700 tty->print_cr(" ------------------- ");
1701 BufferBlob* blob = this->scratch_buffer_blob();
1702 address blob_begin = blob->content_begin();
1703 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1704 assert(false, "wrong size of mach node");
1705 }
1706 #endif
1707 non_safepoints.observe_instruction(n, current_offset);
1708
1709 // mcall is last "call" that can be a safepoint
1710 // record it so we can see if a poll will directly follow it
1711 // in which case we'll need a pad to make the PcDesc sites unique
3041 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3042 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3043 }
3044 }
3045 // Do not allow defs of new derived values to float above GC
3046 // points unless the base is definitely available at the GC point.
3047
3048 Node *m = b->get_node(i);
3049
3050 // Add precedence edge from following safepoint to use of derived pointer
3051 if( last_safept_node != end_node &&
3052 m != last_safept_node) {
3053 for (uint k = 1; k < m->req(); k++) {
3054 const Type *t = m->in(k)->bottom_type();
3055 if( t->isa_oop_ptr() &&
3056 t->is_ptr()->offset() != 0 ) {
3057 last_safept_node->add_prec( m );
3058 break;
3059 }
3060 }
3061 }
3062
3063 if( n->jvms() ) { // Precedence edge from derived to safept
3064 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3065 if( b->get_node(last_safept) != last_safept_node ) {
3066 last_safept = b->find_node(last_safept_node);
3067 }
3068 for( uint j=last_safept; j > i; j-- ) {
3069 Node *mach = b->get_node(j);
3070 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3071 mach->add_prec( n );
3072 }
3073 last_safept = i;
3074 last_safept_node = m;
3075 }
3076 }
3077
3078 if (fat_proj_seen) {
3079 // Garbage collect pinch nodes that were not consumed.
3080 // They are usually created by a fat kill MachProj for a call.
3199 }
3200 #endif
3201
3202 //-----------------------init_scratch_buffer_blob------------------------------
3203 // Construct a temporary BufferBlob and cache it for this compile.
3204 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3205 // If there is already a scratch buffer blob allocated and the
3206 // constant section is big enough, use it. Otherwise free the
3207 // current and allocate a new one.
3208 BufferBlob* blob = scratch_buffer_blob();
3209 if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3210 // Use the current blob.
3211 } else {
3212 if (blob != NULL) {
3213 BufferBlob::free(blob);
3214 }
3215
3216 ResourceMark rm;
3217 _scratch_const_size = const_size;
3218 int size = C2Compiler::initial_code_buffer_size(const_size);
3219 blob = BufferBlob::create("Compile::scratch_buffer", size);
3220 // Record the buffer blob for next time.
3221 set_scratch_buffer_blob(blob);
3222 // Have we run out of code space?
3223 if (scratch_buffer_blob() == NULL) {
3224 // Let CompilerBroker disable further compilations.
3225 C->record_failure("Not enough space for scratch buffer in CodeCache");
3226 return;
3227 }
3228 }
3229
3230 // Initialize the relocation buffers
3231 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3232 set_scratch_locs_memory(locs_buf);
3233 }
3234
3235
3236 //-----------------------scratch_emit_size-------------------------------------
3237 // Helper function that computes size by emitting code
3238 uint PhaseOutput::scratch_emit_size(const Node* n) {
3263 int lsize = MAX_locs_size / 3;
3264 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3265 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3266 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3267 // Mark as scratch buffer.
3268 buf.consts()->set_scratch_emit();
3269 buf.insts()->set_scratch_emit();
3270 buf.stubs()->set_scratch_emit();
3271
3272 // Do the emission.
3273
3274 Label fakeL; // Fake label for branch instructions.
3275 Label* saveL = NULL;
3276 uint save_bnum = 0;
3277 bool is_branch = n->is_MachBranch();
3278 if (is_branch) {
3279 MacroAssembler masm(&buf);
3280 masm.bind(fakeL);
3281 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3282 n->as_MachBranch()->label_set(&fakeL, 0);
3283 }
3284 n->emit(buf, C->regalloc());
3285
3286 // Emitting into the scratch buffer should not fail
3287 assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3288
3289 if (is_branch) // Restore label.
3290 n->as_MachBranch()->label_set(saveL, save_bnum);
3291
3292 // End scratch_emit_size section.
3293 set_in_scratch_emit_size(false);
3294
3295 return buf.insts_size();
3296 }
3297
3298 void PhaseOutput::install() {
3299 if (!C->should_install_code()) {
3300 return;
3301 } else if (C->stub_function() != NULL) {
3302 install_stub(C->stub_name());
3303 } else {
3304 install_code(C->method(),
3305 C->entry_bci(),
3306 CompileBroker::compiler2(),
3307 C->has_unsafe_access(),
3308 SharedRuntime::is_wide_vector(C->max_vector_size()),
3309 C->rtm_state());
3310 }
3314 int entry_bci,
3315 AbstractCompiler* compiler,
3316 bool has_unsafe_access,
3317 bool has_wide_vectors,
3318 RTMState rtm_state) {
3319 // Check if we want to skip execution of all compiled code.
3320 {
3321 #ifndef PRODUCT
3322 if (OptoNoExecute) {
3323 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3324 return;
3325 }
3326 #endif
3327 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3328
3329 if (C->is_osr_compilation()) {
3330 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3331 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3332 } else {
3333 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3334 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3335 }
3336
3337 C->env()->register_method(target,
3338 entry_bci,
3339 &_code_offsets,
3340 _orig_pc_slot_offset_in_bytes,
3341 code_buffer(),
3342 frame_size_in_words(),
3343 oop_map_set(),
3344 &_handler_table,
3345 inc_table(),
3346 compiler,
3347 has_unsafe_access,
3348 SharedRuntime::is_wide_vector(C->max_vector_size()),
3349 C->rtm_state(),
3350 C->native_invokers());
3351
3352 if (C->log() != NULL) { // Print code cache state into compiler log
3353 C->log()->code_cache_state();
3354 }
3355 }
3356 }
3357 void PhaseOutput::install_stub(const char* stub_name) {
3358 // Entry point will be accessed using stub_entry_point();
3359 if (code_buffer() == NULL) {
3360 Matcher::soft_match_failure();
3361 } else {
3362 if (PrintAssembly && (WizardMode || Verbose))
3363 tty->print_cr("### Stub::%s", stub_name);
3364
3365 if (!C->failing()) {
3366 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3367
3368 // Make the NMethod
3369 // For now we mark the frame as never safe for profile stackwalking
3370 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/debugInfo.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compilerDirectives.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/gc_globals.hpp"
37 #include "gc/shared/c2/barrierSetC2.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/allocation.hpp"
40 #include "opto/ad.hpp"
41 #include "opto/block.hpp"
42 #include "opto/c2compiler.hpp"
43 #include "opto/callnode.hpp"
44 #include "opto/cfgnode.hpp"
45 #include "opto/locknode.hpp"
46 #include "opto/machnode.hpp"
47 #include "opto/node.hpp"
48 #include "opto/optoreg.hpp"
49 #include "opto/output.hpp"
50 #include "opto/regalloc.hpp"
51 #include "opto/runtime.hpp"
52 #include "opto/subnode.hpp"
53 #include "opto/type.hpp"
54 #include "runtime/handles.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "utilities/macros.hpp"
290 _code_buffer("Compile::Fill_buffer"),
291 _first_block_size(0),
292 _handler_table(),
293 _inc_table(),
294 _oop_map_set(NULL),
295 _scratch_buffer_blob(NULL),
296 _scratch_locs_memory(NULL),
297 _scratch_const_size(-1),
298 _in_scratch_emit_size(false),
299 _frame_slots(0),
300 _code_offsets(),
301 _node_bundling_limit(0),
302 _node_bundling_base(NULL),
303 _orig_pc_slot(0),
304 _orig_pc_slot_offset_in_bytes(0),
305 _buf_sizes(),
306 _block(NULL),
307 _index(0) {
308 C->set_output(this);
309 if (C->stub_name() == NULL) {
310 int fixed_slots = C->fixed_slots();
311 if (C->needs_stack_repair()) {
312 fixed_slots -= 2;
313 }
314 // TODO 8284443 Only reserve extra slot if needed
315 if (InlineTypeReturnedAsFields) {
316 fixed_slots -= 2;
317 }
318 _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
319 }
320 }
321
322 PhaseOutput::~PhaseOutput() {
323 C->set_output(NULL);
324 if (_scratch_buffer_blob != NULL) {
325 BufferBlob::free(_scratch_buffer_blob);
326 }
327 }
328
329 void PhaseOutput::perform_mach_node_analysis() {
330 // Late barrier analysis must be done after schedule and bundle
331 // Otherwise liveness based spilling will fail
332 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
333 bs->late_barrier_analysis();
334
335 pd_perform_mach_node_analysis();
336 }
337
338 // Convert Nodes to instruction bits and pass off to the VM
339 void PhaseOutput::Output() {
340 // RootNode goes
341 assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
342
343 // The number of new nodes (mostly MachNop) is proportional to
344 // the number of java calls and inner loops which are aligned.
345 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
346 C->inner_loops()*(OptoLoopAlignment-1)),
347 "out of nodes before code generation" ) ) {
348 return;
349 }
350 // Make sure I can find the Start Node
351 Block *entry = C->cfg()->get_block(1);
352 Block *broot = C->cfg()->get_root_block();
353
354 const StartNode *start = entry->head()->as_Start();
355
356 // Replace StartNode with prolog
357 Label verified_entry;
358 MachPrologNode* prolog = new MachPrologNode(&verified_entry);
359 entry->map_node(prolog, 0);
360 C->cfg()->map_node_to_block(prolog, entry);
361 C->cfg()->unmap_node_from_block(start); // start is no longer in any block
362
363 // Virtual methods need an unverified entry point
364 if (C->is_osr_compilation()) {
365 if (PoisonOSREntry) {
366 // TODO: Should use a ShouldNotReachHereNode...
367 C->cfg()->insert( broot, 0, new MachBreakpointNode() );
368 }
369 } else {
370 if (C->method()) {
371 if (C->method()->has_scalarized_args()) {
372 // Add entry point to unpack all inline type arguments
373 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
374 if (!C->method()->is_static()) {
375 // Add verified/unverified entry points to only unpack inline type receiver at interface calls
376 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
377 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ true));
378 C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
379 }
380 } else if (!C->method()->is_static()) {
381 // Insert unvalidated entry point
382 C->cfg()->insert(broot, 0, new MachUEPNode());
383 }
384 }
385 }
386
387 // Break before main entry point
388 if ((C->method() && C->directive()->BreakAtExecuteOption) ||
389 (OptoBreakpoint && C->is_method_compilation()) ||
390 (OptoBreakpointOSR && C->is_osr_compilation()) ||
391 (OptoBreakpointC2R && !C->method()) ) {
392 // checking for C->method() means that OptoBreakpoint does not apply to
393 // runtime stubs or frame converters
394 C->cfg()->insert( entry, 1, new MachBreakpointNode() );
395 }
396
397 // Insert epilogs before every return
398 for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
399 Block* block = C->cfg()->get_block(i);
400 if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
401 Node* m = block->end();
402 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
403 MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
404 block->add_inst(epilog);
405 C->cfg()->map_node_to_block(epilog, block);
406 }
407 }
408 }
409
410 // Keeper of sizing aspects
411 _buf_sizes = BufferSizingData();
412
413 // Initialize code buffer
414 estimate_buffer_size(_buf_sizes._const);
415 if (C->failing()) return;
416
417 // Pre-compute the length of blocks and replace
418 // long branches with short if machine supports it.
419 // Must be done before ScheduleAndBundle due to SPARC delay slots
420 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
421 blk_starts[0] = 0;
422 shorten_branches(blk_starts);
423
424 if (!C->is_osr_compilation() && C->has_scalarized_args()) {
425 // Compute the offsets of the entry points required by the inline type calling convention
426 if (!C->method()->is_static()) {
427 // We have entries at the beginning of the method, implemented by the first 4 nodes.
428 // Entry (unverified) @ offset 0
429 // Verified_Inline_Entry_RO
430 // Inline_Entry (unverified)
431 // Verified_Inline_Entry
432 uint offset = 0;
433 _code_offsets.set_value(CodeOffsets::Entry, offset);
434
435 offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
436 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
437
438 offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
439 _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
440
441 offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
442 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
443 } else {
444 _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
445 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
446 }
447 }
448
449 ScheduleAndBundle();
450 if (C->failing()) {
451 return;
452 }
453
454 perform_mach_node_analysis();
455
456 // Complete sizing of codebuffer
457 CodeBuffer* cb = init_buffer();
458 if (cb == NULL || C->failing()) {
459 return;
460 }
461
462 BuildOopMaps();
463
464 if (C->failing()) {
465 return;
466 }
467
468 fill_buffer(cb, blk_starts);
589 // Sum all instruction sizes to compute block size
590 uint last_inst = block->number_of_nodes();
591 uint blk_size = 0;
592 for (uint j = 0; j < last_inst; j++) {
593 _index = j;
594 Node* nj = block->get_node(_index);
595 // Handle machine instruction nodes
596 if (nj->is_Mach()) {
597 MachNode* mach = nj->as_Mach();
598 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
599 reloc_size += mach->reloc();
600 if (mach->is_MachCall()) {
601 // add size information for trampoline stub
602 // class CallStubImpl is platform-specific and defined in the *.ad files.
603 stub_size += CallStubImpl::size_call_trampoline();
604 reloc_size += CallStubImpl::reloc_call_trampoline();
605
606 MachCallNode *mcall = mach->as_MachCall();
607 // This destination address is NOT PC-relative
608
609 if (mcall->entry_point() != NULL) {
610 mcall->method_set((intptr_t)mcall->entry_point());
611 }
612
613 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
614 stub_size += CompiledStaticCall::to_interp_stub_size();
615 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
616 }
617 } else if (mach->is_MachSafePoint()) {
618 // If call/safepoint are adjacent, account for possible
619 // nop to disambiguate the two safepoints.
620 // ScheduleAndBundle() can rearrange nodes in a block,
621 // check for all offsets inside this block.
622 if (last_call_adr >= blk_starts[i]) {
623 blk_size += nop_size;
624 }
625 }
626 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
627 // Nop is inserted between "avoid back to back" instructions.
628 // ScheduleAndBundle() can rearrange nodes in a block,
629 // check for all offsets inside this block.
630 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
631 blk_size += nop_size;
846 // New functionality:
847 // Assert if the local is not top. In product mode let the new node
848 // override the old entry.
849 assert(local == C->top(), "LocArray collision");
850 if (local == C->top()) {
851 return;
852 }
853 array->pop();
854 }
855 const Type *t = local->bottom_type();
856
857 // Is it a safepoint scalar object node?
858 if (local->is_SafePointScalarObject()) {
859 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
860
861 ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
862 if (sv == NULL) {
863 ciKlass* cik = t->is_oopptr()->klass();
864 assert(cik->is_instance_klass() ||
865 cik->is_array_klass(), "Not supported allocation.");
866 uint first_ind = spobj->first_index(sfpt->jvms());
867 // Nullable, scalarized inline types have an is_init input
868 // that needs to be checked before using the field values.
869 ScopeValue* is_init = NULL;
870 if (cik->is_inlinetype()) {
871 Node* init_node = sfpt->in(first_ind++);
872 assert(init_node != NULL, "is_init node not found");
873 if (!init_node->is_top()) {
874 const TypeInt* init_type = init_node->bottom_type()->is_int();
875 if (init_node->is_Con()) {
876 is_init = new ConstantIntValue(init_type->get_con());
877 } else {
878 OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
879 is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
880 }
881 }
882 }
883 ScopeValue* klass_sv = new ConstantOopWriteValue(cik->java_mirror()->constant_encoding());
884 sv = spobj->is_auto_box() ? new AutoBoxObjectValue(spobj->_idx, klass_sv)
885 : new ObjectValue(spobj->_idx, klass_sv, is_init);
886 set_sv_for_object_node(objs, sv);
887
888 for (uint i = 0; i < spobj->n_fields(); i++) {
889 Node* fld_node = sfpt->in(first_ind+i);
890 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
891 }
892 }
893 array->append(sv);
894 return;
895 }
896
897 // Grab the register number for the local
898 OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
899 if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
900 // Record the double as two float registers.
901 // The register mask for such a value always specifies two adjacent
902 // float registers, with the lower register number even.
903 // Normally, the allocation of high and low words to these registers
904 // is irrelevant, because nearly all operations on register pairs
905 // (e.g., StoreD) treat them as a single unit.
906 // Here, we assume in addition that the words in these two registers
907 // stored "naturally" (by operations like StoreD and double stores
1042 break;
1043 }
1044 }
1045
1046 // Determine if this node starts a bundle
1047 bool PhaseOutput::starts_bundle(const Node *n) const {
1048 return (_node_bundling_limit > n->_idx &&
1049 _node_bundling_base[n->_idx].starts_bundle());
1050 }
1051
1052 //--------------------------Process_OopMap_Node--------------------------------
1053 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1054 // Handle special safepoint nodes for synchronization
1055 MachSafePointNode *sfn = mach->as_MachSafePoint();
1056 MachCallNode *mcall;
1057
1058 int safepoint_pc_offset = current_offset;
1059 bool is_method_handle_invoke = false;
1060 bool is_opt_native = false;
1061 bool return_oop = false;
1062 bool return_scalarized = false;
1063 bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1064 bool arg_escape = false;
1065
1066 // Add the safepoint in the DebugInfoRecorder
1067 if( !mach->is_MachCall() ) {
1068 mcall = NULL;
1069 C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1070 } else {
1071 mcall = mach->as_MachCall();
1072
1073 // Is the call a MethodHandle call?
1074 if (mcall->is_MachCallJava()) {
1075 if (mcall->as_MachCallJava()->_method_handle_invoke) {
1076 assert(C->has_method_handle_invokes(), "must have been set during call generation");
1077 is_method_handle_invoke = true;
1078 }
1079 arg_escape = mcall->as_MachCallJava()->_arg_escape;
1080 } else if (mcall->is_MachCallNative()) {
1081 is_opt_native = true;
1082 }
1083
1084 // Check if a call returns an object.
1085 if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1086 return_oop = true;
1087 }
1088 if (mcall->returns_scalarized()) {
1089 return_scalarized = true;
1090 }
1091 safepoint_pc_offset += mcall->ret_addr_offset();
1092 C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1093 }
1094
1095 // Loop over the JVMState list to add scope information
1096 // Do not skip safepoints with a NULL method, they need monitor info
1097 JVMState* youngest_jvms = sfn->jvms();
1098 int max_depth = youngest_jvms->depth();
1099
1100 // Allocate the object pool for scalar-replaced objects -- the map from
1101 // small-integer keys (which can be recorded in the local and ostack
1102 // arrays) to descriptions of the object state.
1103 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1104
1105 // Visit scopes from oldest to youngest.
1106 for (int depth = 1; depth <= max_depth; depth++) {
1107 JVMState* jvms = youngest_jvms->of_depth(depth);
1108 int idx;
1109 ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1110 // Safepoints that do not have method() set only provide oop-map and monitor info
1196 DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1197
1198 // Make method available for all Safepoints
1199 ciMethod* scope_method = method ? method : C->method();
1200 // Describe the scope here
1201 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1202 assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1203 // Now we can describe the scope.
1204 methodHandle null_mh;
1205 bool rethrow_exception = false;
1206 C->debug_info()->describe_scope(
1207 safepoint_pc_offset,
1208 null_mh,
1209 scope_method,
1210 jvms->bci(),
1211 jvms->should_reexecute(),
1212 rethrow_exception,
1213 is_method_handle_invoke,
1214 is_opt_native,
1215 return_oop,
1216 return_scalarized,
1217 has_ea_local_in_scope,
1218 arg_escape,
1219 locvals,
1220 expvals,
1221 monvals
1222 );
1223 } // End jvms loop
1224
1225 // Mark the end of the scope set.
1226 C->debug_info()->end_safepoint(safepoint_pc_offset);
1227 }
1228
1229
1230
1231 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1232 class NonSafepointEmitter {
1233 Compile* C;
1234 JVMState* _pending_jvms;
1235 int _pending_offset;
1236
1573 MachNode *nop = new MachNopNode(nops_cnt);
1574 block->insert_node(nop, j++);
1575 last_inst++;
1576 C->cfg()->map_node_to_block(nop, block);
1577 // Ensure enough space.
1578 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1579 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1580 C->record_failure("CodeCache is full");
1581 return;
1582 }
1583 nop->emit(*cb, C->regalloc());
1584 cb->flush_bundle(true);
1585 current_offset = cb->insts_size();
1586 }
1587
1588 bool observe_safepoint = is_sfn;
1589 // Remember the start of the last call in a basic block
1590 if (is_mcall) {
1591 MachCallNode *mcall = mach->as_MachCall();
1592
1593 if (mcall->entry_point() != NULL) {
1594 // This destination address is NOT PC-relative
1595 mcall->method_set((intptr_t)mcall->entry_point());
1596 }
1597
1598 // Save the return address
1599 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1600
1601 observe_safepoint = mcall->guaranteed_safepoint();
1602 }
1603
1604 // sfn will be valid whenever mcall is valid now because of inheritance
1605 if (observe_safepoint) {
1606 // Handle special safepoint nodes for synchronization
1607 if (!is_mcall) {
1608 MachSafePointNode *sfn = mach->as_MachSafePoint();
1609 // !!!!! Stubs only need an oopmap right now, so bail out
1610 if (sfn->jvms()->method() == NULL) {
1611 // Write the oopmap directly to the code blob??!!
1612 continue;
1613 }
1614 } // End synchronization
1615
1616 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1740 if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1741 node_offsets[n->_idx] = cb->insts_size();
1742 }
1743 #endif
1744 assert(!C->failing(), "Should not reach here if failing.");
1745
1746 // "Normal" instruction case
1747 DEBUG_ONLY(uint instr_offset = cb->insts_size());
1748 n->emit(*cb, C->regalloc());
1749 current_offset = cb->insts_size();
1750
1751 // Above we only verified that there is enough space in the instruction section.
1752 // However, the instruction may emit stubs that cause code buffer expansion.
1753 // Bail out here if expansion failed due to a lack of code cache space.
1754 if (C->failing()) {
1755 return;
1756 }
1757
1758 assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1759 "ret_addr_offset() not within emitted code");
1760 #ifdef ASSERT
1761 uint n_size = n->size(C->regalloc());
1762 if (n_size < (current_offset-instr_offset)) {
1763 MachNode* mach = n->as_Mach();
1764 n->dump();
1765 mach->dump_format(C->regalloc(), tty);
1766 tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1767 Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1768 tty->print_cr(" ------------------- ");
1769 BufferBlob* blob = this->scratch_buffer_blob();
1770 address blob_begin = blob->content_begin();
1771 Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1772 assert(false, "wrong size of mach node");
1773 }
1774 #endif
1775 non_safepoints.observe_instruction(n, current_offset);
1776
1777 // mcall is last "call" that can be a safepoint
1778 // record it so we can see if a poll will directly follow it
1779 // in which case we'll need a pad to make the PcDesc sites unique
3109 anti_do_use( b, n, _regalloc->get_reg_first(def) );
3110 anti_do_use( b, n, _regalloc->get_reg_second(def) );
3111 }
3112 }
3113 // Do not allow defs of new derived values to float above GC
3114 // points unless the base is definitely available at the GC point.
3115
3116 Node *m = b->get_node(i);
3117
3118 // Add precedence edge from following safepoint to use of derived pointer
3119 if( last_safept_node != end_node &&
3120 m != last_safept_node) {
3121 for (uint k = 1; k < m->req(); k++) {
3122 const Type *t = m->in(k)->bottom_type();
3123 if( t->isa_oop_ptr() &&
3124 t->is_ptr()->offset() != 0 ) {
3125 last_safept_node->add_prec( m );
3126 break;
3127 }
3128 }
3129
3130 // Do not allow a CheckCastPP node whose input is a raw pointer to
3131 // float past a safepoint. This can occur when a buffered inline
3132 // type is allocated in a loop and the CheckCastPP from that
3133 // allocation is reused outside the loop. If the use inside the
3134 // loop is scalarized the CheckCastPP will no longer be connected
3135 // to the loop safepoint. See JDK-8264340.
3136 if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3137 Node *def = m->in(1);
3138 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
3139 last_safept_node->add_prec(m);
3140 }
3141 }
3142 }
3143
3144 if( n->jvms() ) { // Precedence edge from derived to safept
3145 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3146 if( b->get_node(last_safept) != last_safept_node ) {
3147 last_safept = b->find_node(last_safept_node);
3148 }
3149 for( uint j=last_safept; j > i; j-- ) {
3150 Node *mach = b->get_node(j);
3151 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3152 mach->add_prec( n );
3153 }
3154 last_safept = i;
3155 last_safept_node = m;
3156 }
3157 }
3158
3159 if (fat_proj_seen) {
3160 // Garbage collect pinch nodes that were not consumed.
3161 // They are usually created by a fat kill MachProj for a call.
3280 }
3281 #endif
3282
3283 //-----------------------init_scratch_buffer_blob------------------------------
3284 // Construct a temporary BufferBlob and cache it for this compile.
3285 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3286 // If there is already a scratch buffer blob allocated and the
3287 // constant section is big enough, use it. Otherwise free the
3288 // current and allocate a new one.
3289 BufferBlob* blob = scratch_buffer_blob();
3290 if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3291 // Use the current blob.
3292 } else {
3293 if (blob != NULL) {
3294 BufferBlob::free(blob);
3295 }
3296
3297 ResourceMark rm;
3298 _scratch_const_size = const_size;
3299 int size = C2Compiler::initial_code_buffer_size(const_size);
3300 if (C->has_scalarized_args()) {
3301 // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3302 // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3303 ciMethod* method = C->method();
3304 int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3305 int arg_num = 0;
3306 if (!method->is_static()) {
3307 if (method->is_scalarized_arg(arg_num)) {
3308 size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3309 }
3310 arg_num++;
3311 }
3312 for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3313 if (method->is_scalarized_arg(arg_num)) {
3314 size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3315 }
3316 arg_num++;
3317 }
3318 }
3319 blob = BufferBlob::create("Compile::scratch_buffer", size);
3320 // Record the buffer blob for next time.
3321 set_scratch_buffer_blob(blob);
3322 // Have we run out of code space?
3323 if (scratch_buffer_blob() == NULL) {
3324 // Let CompilerBroker disable further compilations.
3325 C->record_failure("Not enough space for scratch buffer in CodeCache");
3326 return;
3327 }
3328 }
3329
3330 // Initialize the relocation buffers
3331 relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3332 set_scratch_locs_memory(locs_buf);
3333 }
3334
3335
3336 //-----------------------scratch_emit_size-------------------------------------
3337 // Helper function that computes size by emitting code
3338 uint PhaseOutput::scratch_emit_size(const Node* n) {
3363 int lsize = MAX_locs_size / 3;
3364 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3365 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3366 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3367 // Mark as scratch buffer.
3368 buf.consts()->set_scratch_emit();
3369 buf.insts()->set_scratch_emit();
3370 buf.stubs()->set_scratch_emit();
3371
3372 // Do the emission.
3373
3374 Label fakeL; // Fake label for branch instructions.
3375 Label* saveL = NULL;
3376 uint save_bnum = 0;
3377 bool is_branch = n->is_MachBranch();
3378 if (is_branch) {
3379 MacroAssembler masm(&buf);
3380 masm.bind(fakeL);
3381 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3382 n->as_MachBranch()->label_set(&fakeL, 0);
3383 } else if (n->is_MachProlog()) {
3384 saveL = ((MachPrologNode*)n)->_verified_entry;
3385 ((MachPrologNode*)n)->_verified_entry = &fakeL;
3386 } else if (n->is_MachVEP()) {
3387 saveL = ((MachVEPNode*)n)->_verified_entry;
3388 ((MachVEPNode*)n)->_verified_entry = &fakeL;
3389 }
3390 n->emit(buf, C->regalloc());
3391
3392 // Emitting into the scratch buffer should not fail
3393 assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3394
3395 // Restore label.
3396 if (is_branch) {
3397 n->as_MachBranch()->label_set(saveL, save_bnum);
3398 } else if (n->is_MachProlog()) {
3399 ((MachPrologNode*)n)->_verified_entry = saveL;
3400 } else if (n->is_MachVEP()) {
3401 ((MachVEPNode*)n)->_verified_entry = saveL;
3402 }
3403
3404 // End scratch_emit_size section.
3405 set_in_scratch_emit_size(false);
3406
3407 return buf.insts_size();
3408 }
3409
3410 void PhaseOutput::install() {
3411 if (!C->should_install_code()) {
3412 return;
3413 } else if (C->stub_function() != NULL) {
3414 install_stub(C->stub_name());
3415 } else {
3416 install_code(C->method(),
3417 C->entry_bci(),
3418 CompileBroker::compiler2(),
3419 C->has_unsafe_access(),
3420 SharedRuntime::is_wide_vector(C->max_vector_size()),
3421 C->rtm_state());
3422 }
3426 int entry_bci,
3427 AbstractCompiler* compiler,
3428 bool has_unsafe_access,
3429 bool has_wide_vectors,
3430 RTMState rtm_state) {
3431 // Check if we want to skip execution of all compiled code.
3432 {
3433 #ifndef PRODUCT
3434 if (OptoNoExecute) {
3435 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3436 return;
3437 }
3438 #endif
3439 Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3440
3441 if (C->is_osr_compilation()) {
3442 _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3443 _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3444 } else {
3445 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3446 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3447 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3448 }
3449 if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3450 _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3451 }
3452 if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3453 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3454 }
3455 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3456 }
3457
3458 C->env()->register_method(target,
3459 entry_bci,
3460 &_code_offsets,
3461 _orig_pc_slot_offset_in_bytes,
3462 code_buffer(),
3463 frame_size_in_words(),
3464 _oop_map_set,
3465 &_handler_table,
3466 &_inc_table,
3467 compiler,
3468 has_unsafe_access,
3469 SharedRuntime::is_wide_vector(C->max_vector_size()),
3470 C->rtm_state(),
3471 C->native_invokers());
3472
3473 if (C->log() != NULL) { // Print code cache state into compiler log
3474 C->log()->code_cache_state();
3475 }
3476 }
3477 }
3478 void PhaseOutput::install_stub(const char* stub_name) {
3479 // Entry point will be accessed using stub_entry_point();
3480 if (code_buffer() == NULL) {
3481 Matcher::soft_match_failure();
3482 } else {
3483 if (PrintAssembly && (WizardMode || Verbose))
3484 tty->print_cr("### Stub::%s", stub_name);
3485
3486 if (!C->failing()) {
3487 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3488
3489 // Make the NMethod
3490 // For now we mark the frame as never safe for profile stackwalking
3491 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
|