1333 // Initialize the space for the BufferBlob used to find and verify
1334 // instruction size in MachNode::emit_size()
1335 init_scratch_buffer_blob(const_req);
1336 }
1337
1338 CodeBuffer* PhaseOutput::init_buffer() {
1339 int stub_req = _buf_sizes._stub;
1340 int code_req = _buf_sizes._code;
1341 int const_req = _buf_sizes._const;
1342
1343 int pad_req = NativeCall::byte_size();
1344
1345 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1346 stub_req += bs->estimate_stub_size();
1347
1348 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1349 // class HandlerImpl is platform-specific and defined in the *.ad files.
1350 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1351 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1352 stub_req += MAX_stubs_size; // ensure per-stub margin
1353 code_req += MAX_inst_size; // ensure per-instruction margin
1354
1355 if (StressCodeBuffers)
1356 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1357
1358 int total_req =
1359 const_req +
1360 code_req +
1361 pad_req +
1362 stub_req +
1363 exception_handler_req +
1364 deopt_handler_req; // deopt handler
1365
1366 CodeBuffer* cb = code_buffer();
1367 cb->set_const_section_alignment(constant_table().alignment());
1368 cb->initialize(total_req, _buf_sizes._reloc);
1369
1370 // Have we run out of code space?
1371 if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1372 C->record_failure("CodeCache is full");
1373 return nullptr;
1510 int padding = mach->compute_padding(current_offset);
1511 // Make sure safepoint node for polling is distinct from a call's
1512 // return by adding a nop if needed.
1513 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1514 padding = nop_size;
1515 }
1516 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1517 current_offset == last_avoid_back_to_back_offset) {
1518 // Avoid back to back some instructions.
1519 padding = nop_size;
1520 }
1521
1522 if (padding > 0) {
1523 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1524 int nops_cnt = padding / nop_size;
1525 MachNode *nop = new MachNopNode(nops_cnt);
1526 block->insert_node(nop, j++);
1527 last_inst++;
1528 C->cfg()->map_node_to_block(nop, block);
1529 // Ensure enough space.
1530 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1532 C->record_failure("CodeCache is full");
1533 return;
1534 }
1535 nop->emit(masm, C->regalloc());
1536 masm->code()->flush_bundle(true);
1537 current_offset = masm->offset();
1538 }
1539
1540 bool observe_safepoint = is_sfn;
1541 // Remember the start of the last call in a basic block
1542 if (is_mcall) {
1543 MachCallNode *mcall = mach->as_MachCall();
1544
1545 // This destination address is NOT PC-relative
1546 mcall->method_set((intptr_t)mcall->entry_point());
1547
1548 // Save the return address
1549 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1550
1636 Label *blkLabel = &blk_labels[block_num];
1637 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1638 }
1639 }
1640 }
1641 } else if (!n->is_Proj()) {
1642 // Remember the beginning of the previous instruction, in case
1643 // it's followed by a flag-kill and a null-check. Happens on
1644 // Intel all the time, with add-to-memory kind of opcodes.
1645 previous_offset = current_offset;
1646 }
1647
1648 // Not an else-if!
1649 // If this is a trap based cmp then add its offset to the list.
1650 if (mach->is_TrapBasedCheckNode()) {
1651 inct_starts[inct_cnt++] = current_offset;
1652 }
1653 }
1654
1655 // Verify that there is sufficient space remaining
1656 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1657 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1658 C->record_failure("CodeCache is full");
1659 return;
1660 }
1661
1662 // Save the offset for the listing
1663 #if defined(SUPPORT_OPTO_ASSEMBLY)
1664 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1665 node_offsets[n->_idx] = masm->offset();
1666 }
1667 #endif
1668 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1669
1670 // "Normal" instruction case
1671 DEBUG_ONLY(uint instr_offset = masm->offset());
1672 n->emit(masm, C->regalloc());
1673 current_offset = masm->offset();
1674
1675 // Above we only verified that there is enough space in the instruction section.
1676 // However, the instruction may emit stubs that cause code buffer expansion.
1817 ttyLocker ttyl; // keep the following output all in one block
1818 if (!VMThread::should_terminate()) { // test this under the tty lock
1819 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1820 // We call them first and write to a stringStream, then we retake the lock to
1821 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1822 ResourceMark rm;
1823 stringStream method_metadata_str;
1824 if (C->method() != nullptr) {
1825 C->method()->print_metadata(&method_metadata_str);
1826 }
1827 stringStream dump_asm_str;
1828 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1829
1830 NoSafepointVerifier nsv;
1831 ttyLocker ttyl2;
1832 // This output goes directly to the tty, not the compiler log.
1833 // To enable tools to match it up with the compilation activity,
1834 // be sure to tag this tty output with the compile ID.
1835 if (xtty != nullptr) {
1836 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1837 C->is_osr_compilation() ? " compile_kind='osr'" : "");
1838 }
1839 if (C->method() != nullptr) {
1840 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1841 tty->print_raw(method_metadata_str.freeze());
1842 } else if (C->stub_name() != nullptr) {
1843 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1844 }
1845 tty->cr();
1846 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1847 tty->print_raw(dump_asm_str.freeze());
1848 tty->print_cr("--------------------------------------------------------------------------------");
1849 if (xtty != nullptr) {
1850 xtty->tail("opto_assembly");
1851 }
1852 }
1853 }
1854 #endif
1855 }
1856
1857 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3104
3105
3106 //-----------------------scratch_emit_size-------------------------------------
3107 // Helper function that computes size by emitting code
3108 uint PhaseOutput::scratch_emit_size(const Node* n) {
3109 // Start scratch_emit_size section.
3110 set_in_scratch_emit_size(true);
3111
3112 // Emit into a trash buffer and count bytes emitted.
3113 // This is a pretty expensive way to compute a size,
3114 // but it works well enough if seldom used.
3115 // All common fixed-size instructions are given a size
3116 // method by the AD file.
3117 // Note that the scratch buffer blob and locs memory are
3118 // allocated at the beginning of the compile task, and
3119 // may be shared by several calls to scratch_emit_size.
3120 // The allocation of the scratch buffer blob is particularly
3121 // expensive, since it has to grab the code cache lock.
3122 BufferBlob* blob = this->scratch_buffer_blob();
3123 assert(blob != nullptr, "Initialize BufferBlob at start");
3124 assert(blob->size() > MAX_inst_size, "sanity");
3125 relocInfo* locs_buf = scratch_locs_memory();
3126 address blob_begin = blob->content_begin();
3127 address blob_end = (address)locs_buf;
3128 assert(blob->contains(blob_end), "sanity");
3129 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3130 buf.initialize_consts_size(_scratch_const_size);
3131 buf.initialize_stubs_size(MAX_stubs_size);
3132 assert(locs_buf != nullptr, "sanity");
3133 int lsize = MAX_locs_size / 3;
3134 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3135 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3136 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3137 // Mark as scratch buffer.
3138 buf.consts()->set_scratch_emit();
3139 buf.insts()->set_scratch_emit();
3140 buf.stubs()->set_scratch_emit();
3141
3142 // Do the emission.
3143
3144 Label fakeL; // Fake label for branch instructions.
3149 masm.bind(fakeL);
3150 if (is_branch) {
3151 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3152 n->as_MachBranch()->label_set(&fakeL, 0);
3153 }
3154 n->emit(&masm, C->regalloc());
3155
3156 // Emitting into the scratch buffer should not fail
3157 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3158
3159 if (is_branch) // Restore label.
3160 n->as_MachBranch()->label_set(saveL, save_bnum);
3161
3162 // End scratch_emit_size section.
3163 set_in_scratch_emit_size(false);
3164
3165 return buf.insts_size();
3166 }
3167
3168 void PhaseOutput::install() {
3169 if (!C->should_install_code()) {
3170 return;
3171 } else if (C->stub_function() != nullptr) {
3172 install_stub(C->stub_name());
3173 } else {
3174 install_code(C->method(),
3175 C->entry_bci(),
3176 CompileBroker::compiler2(),
3177 C->has_unsafe_access(),
3178 SharedRuntime::is_wide_vector(C->max_vector_size()));
3179 }
3180 }
3181
3182 void PhaseOutput::install_code(ciMethod* target,
3183 int entry_bci,
3184 AbstractCompiler* compiler,
3185 bool has_unsafe_access,
3186 bool has_wide_vectors) {
3187 // Check if we want to skip execution of all compiled code.
3188 {
3189 #ifndef PRODUCT
3190 if (OptoNoExecute) {
3191 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3192 return;
3193 }
3194 #endif
3195 Compile::TracePhase tp(_t_registerMethod);
3196
3201 if (!target->is_static()) {
3202 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3203 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3204 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3205 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3206 }
3207 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3208 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3209 }
3210
3211 C->env()->register_method(target,
3212 entry_bci,
3213 &_code_offsets,
3214 _orig_pc_slot_offset_in_bytes,
3215 code_buffer(),
3216 frame_size_in_words(),
3217 oop_map_set(),
3218 &_handler_table,
3219 inc_table(),
3220 compiler,
3221 has_unsafe_access,
3222 SharedRuntime::is_wide_vector(C->max_vector_size()),
3223 C->has_monitors(),
3224 C->has_scoped_access(),
3225 0);
3226
3227 if (C->log() != nullptr) { // Print code cache state into compiler log
3228 C->log()->code_cache_state();
3229 }
3230 }
3231 }
3232 void PhaseOutput::install_stub(const char* stub_name) {
3233 // Entry point will be accessed using stub_entry_point();
3234 if (code_buffer() == nullptr) {
3235 Matcher::soft_match_failure();
3236 } else {
3237 if (PrintAssembly && (WizardMode || Verbose))
3238 tty->print_cr("### Stub::%s", stub_name);
3239
3240 if (!C->failing()) {
3241 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3242
3243 // Make the NMethod
3244 // For now we mark the frame as never safe for profile stackwalking
3245 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3246 code_buffer(),
3247 CodeOffsets::frame_never_safe,
3248 // _code_offsets.value(CodeOffsets::Frame_Complete),
3249 frame_size_in_words(),
3386
3387 // Dump the exception table as well
3388 if( n->is_Catch() && (Verbose || WizardMode) ) {
3389 // Print the exception table for this offset
3390 _handler_table.print_subtable_for(pc);
3391 }
3392 st->bol(); // Make sure we start on a new line
3393 }
3394 st->cr(); // one empty line between blocks
3395 } // End of per-block dump
3396
3397 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3398 }
3399 #endif
3400
3401 #ifndef PRODUCT
3402 void PhaseOutput::print_statistics() {
3403 Scheduling::print_statistics();
3404 }
3405 #endif
|
1333 // Initialize the space for the BufferBlob used to find and verify
1334 // instruction size in MachNode::emit_size()
1335 init_scratch_buffer_blob(const_req);
1336 }
1337
1338 CodeBuffer* PhaseOutput::init_buffer() {
1339 int stub_req = _buf_sizes._stub;
1340 int code_req = _buf_sizes._code;
1341 int const_req = _buf_sizes._const;
1342
1343 int pad_req = NativeCall::byte_size();
1344
1345 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1346 stub_req += bs->estimate_stub_size();
1347
1348 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1349 // class HandlerImpl is platform-specific and defined in the *.ad files.
1350 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1351 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1352 stub_req += MAX_stubs_size; // ensure per-stub margin
1353 code_req += max_inst_size(); // ensure per-instruction margin
1354
1355 if (StressCodeBuffers)
1356 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1357
1358 int total_req =
1359 const_req +
1360 code_req +
1361 pad_req +
1362 stub_req +
1363 exception_handler_req +
1364 deopt_handler_req; // deopt handler
1365
1366 CodeBuffer* cb = code_buffer();
1367 cb->set_const_section_alignment(constant_table().alignment());
1368 cb->initialize(total_req, _buf_sizes._reloc);
1369
1370 // Have we run out of code space?
1371 if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1372 C->record_failure("CodeCache is full");
1373 return nullptr;
1510 int padding = mach->compute_padding(current_offset);
1511 // Make sure safepoint node for polling is distinct from a call's
1512 // return by adding a nop if needed.
1513 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1514 padding = nop_size;
1515 }
1516 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1517 current_offset == last_avoid_back_to_back_offset) {
1518 // Avoid back to back some instructions.
1519 padding = nop_size;
1520 }
1521
1522 if (padding > 0) {
1523 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1524 int nops_cnt = padding / nop_size;
1525 MachNode *nop = new MachNopNode(nops_cnt);
1526 block->insert_node(nop, j++);
1527 last_inst++;
1528 C->cfg()->map_node_to_block(nop, block);
1529 // Ensure enough space.
1530 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1531 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1532 C->record_failure("CodeCache is full");
1533 return;
1534 }
1535 nop->emit(masm, C->regalloc());
1536 masm->code()->flush_bundle(true);
1537 current_offset = masm->offset();
1538 }
1539
1540 bool observe_safepoint = is_sfn;
1541 // Remember the start of the last call in a basic block
1542 if (is_mcall) {
1543 MachCallNode *mcall = mach->as_MachCall();
1544
1545 // This destination address is NOT PC-relative
1546 mcall->method_set((intptr_t)mcall->entry_point());
1547
1548 // Save the return address
1549 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1550
1636 Label *blkLabel = &blk_labels[block_num];
1637 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1638 }
1639 }
1640 }
1641 } else if (!n->is_Proj()) {
1642 // Remember the beginning of the previous instruction, in case
1643 // it's followed by a flag-kill and a null-check. Happens on
1644 // Intel all the time, with add-to-memory kind of opcodes.
1645 previous_offset = current_offset;
1646 }
1647
1648 // Not an else-if!
1649 // If this is a trap based cmp then add its offset to the list.
1650 if (mach->is_TrapBasedCheckNode()) {
1651 inct_starts[inct_cnt++] = current_offset;
1652 }
1653 }
1654
1655 // Verify that there is sufficient space remaining
1656 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1657 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1658 C->record_failure("CodeCache is full");
1659 return;
1660 }
1661
1662 // Save the offset for the listing
1663 #if defined(SUPPORT_OPTO_ASSEMBLY)
1664 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1665 node_offsets[n->_idx] = masm->offset();
1666 }
1667 #endif
1668 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1669
1670 // "Normal" instruction case
1671 DEBUG_ONLY(uint instr_offset = masm->offset());
1672 n->emit(masm, C->regalloc());
1673 current_offset = masm->offset();
1674
1675 // Above we only verified that there is enough space in the instruction section.
1676 // However, the instruction may emit stubs that cause code buffer expansion.
1817 ttyLocker ttyl; // keep the following output all in one block
1818 if (!VMThread::should_terminate()) { // test this under the tty lock
1819 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1820 // We call them first and write to a stringStream, then we retake the lock to
1821 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1822 ResourceMark rm;
1823 stringStream method_metadata_str;
1824 if (C->method() != nullptr) {
1825 C->method()->print_metadata(&method_metadata_str);
1826 }
1827 stringStream dump_asm_str;
1828 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1829
1830 NoSafepointVerifier nsv;
1831 ttyLocker ttyl2;
1832 // This output goes directly to the tty, not the compiler log.
1833 // To enable tools to match it up with the compilation activity,
1834 // be sure to tag this tty output with the compile ID.
1835 if (xtty != nullptr) {
1836 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1837 C->is_osr_compilation() ? " compile_kind='osr'" :
1838 (C->for_preload() ? " compile_kind='AP'" : ""));
1839 }
1840 if (C->method() != nullptr) {
1841 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1842 tty->print_raw(method_metadata_str.freeze());
1843 } else if (C->stub_name() != nullptr) {
1844 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1845 }
1846 tty->cr();
1847 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1848 tty->print_raw(dump_asm_str.freeze());
1849 tty->print_cr("--------------------------------------------------------------------------------");
1850 if (xtty != nullptr) {
1851 xtty->tail("opto_assembly");
1852 }
1853 }
1854 }
1855 #endif
1856 }
1857
1858 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3105
3106
3107 //-----------------------scratch_emit_size-------------------------------------
3108 // Helper function that computes size by emitting code
3109 uint PhaseOutput::scratch_emit_size(const Node* n) {
3110 // Start scratch_emit_size section.
3111 set_in_scratch_emit_size(true);
3112
3113 // Emit into a trash buffer and count bytes emitted.
3114 // This is a pretty expensive way to compute a size,
3115 // but it works well enough if seldom used.
3116 // All common fixed-size instructions are given a size
3117 // method by the AD file.
3118 // Note that the scratch buffer blob and locs memory are
3119 // allocated at the beginning of the compile task, and
3120 // may be shared by several calls to scratch_emit_size.
3121 // The allocation of the scratch buffer blob is particularly
3122 // expensive, since it has to grab the code cache lock.
3123 BufferBlob* blob = this->scratch_buffer_blob();
3124 assert(blob != nullptr, "Initialize BufferBlob at start");
3125 assert(blob->size() > max_inst_size(), "sanity");
3126 relocInfo* locs_buf = scratch_locs_memory();
3127 address blob_begin = blob->content_begin();
3128 address blob_end = (address)locs_buf;
3129 assert(blob->contains(blob_end), "sanity");
3130 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3131 buf.initialize_consts_size(_scratch_const_size);
3132 buf.initialize_stubs_size(MAX_stubs_size);
3133 assert(locs_buf != nullptr, "sanity");
3134 int lsize = MAX_locs_size / 3;
3135 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3136 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3137 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3138 // Mark as scratch buffer.
3139 buf.consts()->set_scratch_emit();
3140 buf.insts()->set_scratch_emit();
3141 buf.stubs()->set_scratch_emit();
3142
3143 // Do the emission.
3144
3145 Label fakeL; // Fake label for branch instructions.
3150 masm.bind(fakeL);
3151 if (is_branch) {
3152 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3153 n->as_MachBranch()->label_set(&fakeL, 0);
3154 }
3155 n->emit(&masm, C->regalloc());
3156
3157 // Emitting into the scratch buffer should not fail
3158 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3159
3160 if (is_branch) // Restore label.
3161 n->as_MachBranch()->label_set(saveL, save_bnum);
3162
3163 // End scratch_emit_size section.
3164 set_in_scratch_emit_size(false);
3165
3166 return buf.insts_size();
3167 }
3168
3169 void PhaseOutput::install() {
3170 if (C->should_install_code() && C->stub_function() != nullptr) {
3171 install_stub(C->stub_name());
3172 } else {
3173 install_code(C->method(),
3174 C->entry_bci(),
3175 CompilerThread::current()->compiler(),
3176 C->has_unsafe_access(),
3177 SharedRuntime::is_wide_vector(C->max_vector_size()));
3178 }
3179 }
3180
3181 void PhaseOutput::install_code(ciMethod* target,
3182 int entry_bci,
3183 AbstractCompiler* compiler,
3184 bool has_unsafe_access,
3185 bool has_wide_vectors) {
3186 // Check if we want to skip execution of all compiled code.
3187 {
3188 #ifndef PRODUCT
3189 if (OptoNoExecute) {
3190 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3191 return;
3192 }
3193 #endif
3194 Compile::TracePhase tp(_t_registerMethod);
3195
3200 if (!target->is_static()) {
3201 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3202 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3203 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3204 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3205 }
3206 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3207 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3208 }
3209
3210 C->env()->register_method(target,
3211 entry_bci,
3212 &_code_offsets,
3213 _orig_pc_slot_offset_in_bytes,
3214 code_buffer(),
3215 frame_size_in_words(),
3216 oop_map_set(),
3217 &_handler_table,
3218 inc_table(),
3219 compiler,
3220 C->has_clinit_barriers(),
3221 C->for_preload(),
3222 has_unsafe_access,
3223 SharedRuntime::is_wide_vector(C->max_vector_size()),
3224 C->has_monitors(),
3225 C->has_scoped_access(),
3226 0,
3227 C->should_install_code());
3228
3229 if (C->log() != nullptr) { // Print code cache state into compiler log
3230 C->log()->code_cache_state();
3231 }
3232 assert(!C->has_clinit_barriers() || C->for_preload(), "class init barriers should be only in preload code");
3233 }
3234 }
3235 void PhaseOutput::install_stub(const char* stub_name) {
3236 // Entry point will be accessed using stub_entry_point();
3237 if (code_buffer() == nullptr) {
3238 Matcher::soft_match_failure();
3239 } else {
3240 if (PrintAssembly && (WizardMode || Verbose))
3241 tty->print_cr("### Stub::%s", stub_name);
3242
3243 if (!C->failing()) {
3244 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3245
3246 // Make the NMethod
3247 // For now we mark the frame as never safe for profile stackwalking
3248 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3249 code_buffer(),
3250 CodeOffsets::frame_never_safe,
3251 // _code_offsets.value(CodeOffsets::Frame_Complete),
3252 frame_size_in_words(),
3389
3390 // Dump the exception table as well
3391 if( n->is_Catch() && (Verbose || WizardMode) ) {
3392 // Print the exception table for this offset
3393 _handler_table.print_subtable_for(pc);
3394 }
3395 st->bol(); // Make sure we start on a new line
3396 }
3397 st->cr(); // one empty line between blocks
3398 } // End of per-block dump
3399
3400 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3401 }
3402 #endif
3403
3404 #ifndef PRODUCT
3405 void PhaseOutput::print_statistics() {
3406 Scheduling::print_statistics();
3407 }
3408 #endif
3409
3410 int PhaseOutput::max_inst_size() {
3411 if (AOTCodeCache::maybe_dumping_code()) {
3412 // See the comment in output.hpp.
3413 return 16384;
3414 } else {
3415 return mainline_MAX_inst_size;
3416 }
3417 }
3418
3419 int PhaseOutput::max_inst_gcstub_size() {
3420 assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3421 return mainline_MAX_inst_size;
3422 }
|