1340 // Initialize the space for the BufferBlob used to find and verify
1341 // instruction size in MachNode::emit_size()
1342 init_scratch_buffer_blob(const_req);
1343 }
1344
1345 CodeBuffer* PhaseOutput::init_buffer() {
1346 int stub_req = _buf_sizes._stub;
1347 int code_req = _buf_sizes._code;
1348 int const_req = _buf_sizes._const;
1349
1350 int pad_req = NativeCall::byte_size();
1351
1352 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1353 stub_req += bs->estimate_stub_size();
1354
1355 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1356 // class HandlerImpl is platform-specific and defined in the *.ad files.
1357 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1358 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1359 stub_req += MAX_stubs_size; // ensure per-stub margin
1360 code_req += MAX_inst_size; // ensure per-instruction margin
1361
1362 if (StressCodeBuffers)
1363 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1364
1365 int total_req =
1366 const_req +
1367 code_req +
1368 pad_req +
1369 stub_req +
1370 exception_handler_req +
1371 deopt_handler_req; // deopt handler
1372
1373 if (C->has_method_handle_invokes())
1374 total_req += deopt_handler_req; // deopt MH handler
1375
1376 CodeBuffer* cb = code_buffer();
1377 cb->set_const_section_alignment(constant_table().alignment());
1378 cb->initialize(total_req, _buf_sizes._reloc);
1379
1380 // Have we run out of code space?
1520 int padding = mach->compute_padding(current_offset);
1521 // Make sure safepoint node for polling is distinct from a call's
1522 // return by adding a nop if needed.
1523 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1524 padding = nop_size;
1525 }
1526 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1527 current_offset == last_avoid_back_to_back_offset) {
1528 // Avoid back to back some instructions.
1529 padding = nop_size;
1530 }
1531
1532 if (padding > 0) {
1533 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1534 int nops_cnt = padding / nop_size;
1535 MachNode *nop = new MachNopNode(nops_cnt);
1536 block->insert_node(nop, j++);
1537 last_inst++;
1538 C->cfg()->map_node_to_block(nop, block);
1539 // Ensure enough space.
1540 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1541 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1542 C->record_failure("CodeCache is full");
1543 return;
1544 }
1545 nop->emit(masm, C->regalloc());
1546 masm->code()->flush_bundle(true);
1547 current_offset = masm->offset();
1548 }
1549
1550 bool observe_safepoint = is_sfn;
1551 // Remember the start of the last call in a basic block
1552 if (is_mcall) {
1553 MachCallNode *mcall = mach->as_MachCall();
1554
1555 // This destination address is NOT PC-relative
1556 mcall->method_set((intptr_t)mcall->entry_point());
1557
1558 // Save the return address
1559 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1560
1646 Label *blkLabel = &blk_labels[block_num];
1647 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1648 }
1649 }
1650 }
1651 } else if (!n->is_Proj()) {
1652 // Remember the beginning of the previous instruction, in case
1653 // it's followed by a flag-kill and a null-check. Happens on
1654 // Intel all the time, with add-to-memory kind of opcodes.
1655 previous_offset = current_offset;
1656 }
1657
1658 // Not an else-if!
1659 // If this is a trap based cmp then add its offset to the list.
1660 if (mach->is_TrapBasedCheckNode()) {
1661 inct_starts[inct_cnt++] = current_offset;
1662 }
1663 }
1664
1665 // Verify that there is sufficient space remaining
1666 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1667 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1668 C->record_failure("CodeCache is full");
1669 return;
1670 }
1671
1672 // Save the offset for the listing
1673 #if defined(SUPPORT_OPTO_ASSEMBLY)
1674 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1675 node_offsets[n->_idx] = masm->offset();
1676 }
1677 #endif
1678 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1679
1680 // "Normal" instruction case
1681 DEBUG_ONLY(uint instr_offset = masm->offset());
1682 n->emit(masm, C->regalloc());
1683 current_offset = masm->offset();
1684
1685 // Above we only verified that there is enough space in the instruction section.
1686 // However, the instruction may emit stubs that cause code buffer expansion.
1834 ttyLocker ttyl; // keep the following output all in one block
1835 if (!VMThread::should_terminate()) { // test this under the tty lock
1836 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1837 // We call them first and write to a stringStream, then we retake the lock to
1838 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1839 ResourceMark rm;
1840 stringStream method_metadata_str;
1841 if (C->method() != nullptr) {
1842 C->method()->print_metadata(&method_metadata_str);
1843 }
1844 stringStream dump_asm_str;
1845 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1846
1847 NoSafepointVerifier nsv;
1848 ttyLocker ttyl2;
1849 // This output goes directly to the tty, not the compiler log.
1850 // To enable tools to match it up with the compilation activity,
1851 // be sure to tag this tty output with the compile ID.
1852 if (xtty != nullptr) {
1853 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1854 C->is_osr_compilation() ? " compile_kind='osr'" : "");
1855 }
1856 if (C->method() != nullptr) {
1857 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1858 tty->print_raw(method_metadata_str.freeze());
1859 } else if (C->stub_name() != nullptr) {
1860 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1861 }
1862 tty->cr();
1863 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1864 tty->print_raw(dump_asm_str.freeze());
1865 tty->print_cr("--------------------------------------------------------------------------------");
1866 if (xtty != nullptr) {
1867 xtty->tail("opto_assembly");
1868 }
1869 }
1870 }
1871 #endif
1872 }
1873
1874 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3121
3122
3123 //-----------------------scratch_emit_size-------------------------------------
3124 // Helper function that computes size by emitting code
3125 uint PhaseOutput::scratch_emit_size(const Node* n) {
3126 // Start scratch_emit_size section.
3127 set_in_scratch_emit_size(true);
3128
3129 // Emit into a trash buffer and count bytes emitted.
3130 // This is a pretty expensive way to compute a size,
3131 // but it works well enough if seldom used.
3132 // All common fixed-size instructions are given a size
3133 // method by the AD file.
3134 // Note that the scratch buffer blob and locs memory are
3135 // allocated at the beginning of the compile task, and
3136 // may be shared by several calls to scratch_emit_size.
3137 // The allocation of the scratch buffer blob is particularly
3138 // expensive, since it has to grab the code cache lock.
3139 BufferBlob* blob = this->scratch_buffer_blob();
3140 assert(blob != nullptr, "Initialize BufferBlob at start");
3141 assert(blob->size() > MAX_inst_size, "sanity");
3142 relocInfo* locs_buf = scratch_locs_memory();
3143 address blob_begin = blob->content_begin();
3144 address blob_end = (address)locs_buf;
3145 assert(blob->contains(blob_end), "sanity");
3146 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3147 buf.initialize_consts_size(_scratch_const_size);
3148 buf.initialize_stubs_size(MAX_stubs_size);
3149 assert(locs_buf != nullptr, "sanity");
3150 int lsize = MAX_locs_size / 3;
3151 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3152 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3153 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3154 // Mark as scratch buffer.
3155 buf.consts()->set_scratch_emit();
3156 buf.insts()->set_scratch_emit();
3157 buf.stubs()->set_scratch_emit();
3158
3159 // Do the emission.
3160
3161 Label fakeL; // Fake label for branch instructions.
3166 masm.bind(fakeL);
3167 if (is_branch) {
3168 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3169 n->as_MachBranch()->label_set(&fakeL, 0);
3170 }
3171 n->emit(&masm, C->regalloc());
3172
3173 // Emitting into the scratch buffer should not fail
3174 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3175
3176 if (is_branch) // Restore label.
3177 n->as_MachBranch()->label_set(saveL, save_bnum);
3178
3179 // End scratch_emit_size section.
3180 set_in_scratch_emit_size(false);
3181
3182 return buf.insts_size();
3183 }
3184
3185 void PhaseOutput::install() {
3186 if (!C->should_install_code()) {
3187 return;
3188 } else if (C->stub_function() != nullptr) {
3189 install_stub(C->stub_name());
3190 } else {
3191 install_code(C->method(),
3192 C->entry_bci(),
3193 CompileBroker::compiler2(),
3194 C->has_unsafe_access(),
3195 SharedRuntime::is_wide_vector(C->max_vector_size()));
3196 }
3197 }
3198
3199 void PhaseOutput::install_code(ciMethod* target,
3200 int entry_bci,
3201 AbstractCompiler* compiler,
3202 bool has_unsafe_access,
3203 bool has_wide_vectors) {
3204 // Check if we want to skip execution of all compiled code.
3205 {
3206 #ifndef PRODUCT
3207 if (OptoNoExecute) {
3208 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3209 return;
3210 }
3211 #endif
3212 Compile::TracePhase tp(_t_registerMethod);
3213
3218 if (!target->is_static()) {
3219 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3220 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3221 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3222 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3223 }
3224 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3225 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3226 }
3227
3228 C->env()->register_method(target,
3229 entry_bci,
3230 &_code_offsets,
3231 _orig_pc_slot_offset_in_bytes,
3232 code_buffer(),
3233 frame_size_in_words(),
3234 oop_map_set(),
3235 &_handler_table,
3236 inc_table(),
3237 compiler,
3238 has_unsafe_access,
3239 SharedRuntime::is_wide_vector(C->max_vector_size()),
3240 C->has_monitors(),
3241 C->has_scoped_access(),
3242 0);
3243
3244 if (C->log() != nullptr) { // Print code cache state into compiler log
3245 C->log()->code_cache_state();
3246 }
3247 }
3248 }
3249 void PhaseOutput::install_stub(const char* stub_name) {
3250 // Entry point will be accessed using stub_entry_point();
3251 if (code_buffer() == nullptr) {
3252 Matcher::soft_match_failure();
3253 } else {
3254 if (PrintAssembly && (WizardMode || Verbose))
3255 tty->print_cr("### Stub::%s", stub_name);
3256
3257 if (!C->failing()) {
3258 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3259
3260 // Make the NMethod
3261 // For now we mark the frame as never safe for profile stackwalking
3262 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3263 code_buffer(),
3264 CodeOffsets::frame_never_safe,
3265 // _code_offsets.value(CodeOffsets::Frame_Complete),
3266 frame_size_in_words(),
3403
3404 // Dump the exception table as well
3405 if( n->is_Catch() && (Verbose || WizardMode) ) {
3406 // Print the exception table for this offset
3407 _handler_table.print_subtable_for(pc);
3408 }
3409 st->bol(); // Make sure we start on a new line
3410 }
3411 st->cr(); // one empty line between blocks
3412 } // End of per-block dump
3413
3414 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3415 }
3416 #endif
3417
3418 #ifndef PRODUCT
3419 void PhaseOutput::print_statistics() {
3420 Scheduling::print_statistics();
3421 }
3422 #endif
|
1340 // Initialize the space for the BufferBlob used to find and verify
1341 // instruction size in MachNode::emit_size()
1342 init_scratch_buffer_blob(const_req);
1343 }
1344
1345 CodeBuffer* PhaseOutput::init_buffer() {
1346 int stub_req = _buf_sizes._stub;
1347 int code_req = _buf_sizes._code;
1348 int const_req = _buf_sizes._const;
1349
1350 int pad_req = NativeCall::byte_size();
1351
1352 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1353 stub_req += bs->estimate_stub_size();
1354
1355 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1356 // class HandlerImpl is platform-specific and defined in the *.ad files.
1357 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1358 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1359 stub_req += MAX_stubs_size; // ensure per-stub margin
1360 code_req += max_inst_size(); // ensure per-instruction margin
1361
1362 if (StressCodeBuffers)
1363 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1364
1365 int total_req =
1366 const_req +
1367 code_req +
1368 pad_req +
1369 stub_req +
1370 exception_handler_req +
1371 deopt_handler_req; // deopt handler
1372
1373 if (C->has_method_handle_invokes())
1374 total_req += deopt_handler_req; // deopt MH handler
1375
1376 CodeBuffer* cb = code_buffer();
1377 cb->set_const_section_alignment(constant_table().alignment());
1378 cb->initialize(total_req, _buf_sizes._reloc);
1379
1380 // Have we run out of code space?
1520 int padding = mach->compute_padding(current_offset);
1521 // Make sure safepoint node for polling is distinct from a call's
1522 // return by adding a nop if needed.
1523 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1524 padding = nop_size;
1525 }
1526 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1527 current_offset == last_avoid_back_to_back_offset) {
1528 // Avoid back to back some instructions.
1529 padding = nop_size;
1530 }
1531
1532 if (padding > 0) {
1533 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1534 int nops_cnt = padding / nop_size;
1535 MachNode *nop = new MachNopNode(nops_cnt);
1536 block->insert_node(nop, j++);
1537 last_inst++;
1538 C->cfg()->map_node_to_block(nop, block);
1539 // Ensure enough space.
1540 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1541 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1542 C->record_failure("CodeCache is full");
1543 return;
1544 }
1545 nop->emit(masm, C->regalloc());
1546 masm->code()->flush_bundle(true);
1547 current_offset = masm->offset();
1548 }
1549
1550 bool observe_safepoint = is_sfn;
1551 // Remember the start of the last call in a basic block
1552 if (is_mcall) {
1553 MachCallNode *mcall = mach->as_MachCall();
1554
1555 // This destination address is NOT PC-relative
1556 mcall->method_set((intptr_t)mcall->entry_point());
1557
1558 // Save the return address
1559 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1560
1646 Label *blkLabel = &blk_labels[block_num];
1647 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1648 }
1649 }
1650 }
1651 } else if (!n->is_Proj()) {
1652 // Remember the beginning of the previous instruction, in case
1653 // it's followed by a flag-kill and a null-check. Happens on
1654 // Intel all the time, with add-to-memory kind of opcodes.
1655 previous_offset = current_offset;
1656 }
1657
1658 // Not an else-if!
1659 // If this is a trap based cmp then add its offset to the list.
1660 if (mach->is_TrapBasedCheckNode()) {
1661 inct_starts[inct_cnt++] = current_offset;
1662 }
1663 }
1664
1665 // Verify that there is sufficient space remaining
1666 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1667 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1668 C->record_failure("CodeCache is full");
1669 return;
1670 }
1671
1672 // Save the offset for the listing
1673 #if defined(SUPPORT_OPTO_ASSEMBLY)
1674 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1675 node_offsets[n->_idx] = masm->offset();
1676 }
1677 #endif
1678 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1679
1680 // "Normal" instruction case
1681 DEBUG_ONLY(uint instr_offset = masm->offset());
1682 n->emit(masm, C->regalloc());
1683 current_offset = masm->offset();
1684
1685 // Above we only verified that there is enough space in the instruction section.
1686 // However, the instruction may emit stubs that cause code buffer expansion.
1834 ttyLocker ttyl; // keep the following output all in one block
1835 if (!VMThread::should_terminate()) { // test this under the tty lock
1836 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1837 // We call them first and write to a stringStream, then we retake the lock to
1838 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1839 ResourceMark rm;
1840 stringStream method_metadata_str;
1841 if (C->method() != nullptr) {
1842 C->method()->print_metadata(&method_metadata_str);
1843 }
1844 stringStream dump_asm_str;
1845 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1846
1847 NoSafepointVerifier nsv;
1848 ttyLocker ttyl2;
1849 // This output goes directly to the tty, not the compiler log.
1850 // To enable tools to match it up with the compilation activity,
1851 // be sure to tag this tty output with the compile ID.
1852 if (xtty != nullptr) {
1853 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1854 C->is_osr_compilation() ? " compile_kind='osr'" :
1855 (C->for_preload() ? " compile_kind='AP'" : ""));
1856 }
1857 if (C->method() != nullptr) {
1858 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1859 tty->print_raw(method_metadata_str.freeze());
1860 } else if (C->stub_name() != nullptr) {
1861 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1862 }
1863 tty->cr();
1864 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1865 tty->print_raw(dump_asm_str.freeze());
1866 tty->print_cr("--------------------------------------------------------------------------------");
1867 if (xtty != nullptr) {
1868 xtty->tail("opto_assembly");
1869 }
1870 }
1871 }
1872 #endif
1873 }
1874
1875 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3122
3123
3124 //-----------------------scratch_emit_size-------------------------------------
3125 // Helper function that computes size by emitting code
3126 uint PhaseOutput::scratch_emit_size(const Node* n) {
3127 // Start scratch_emit_size section.
3128 set_in_scratch_emit_size(true);
3129
3130 // Emit into a trash buffer and count bytes emitted.
3131 // This is a pretty expensive way to compute a size,
3132 // but it works well enough if seldom used.
3133 // All common fixed-size instructions are given a size
3134 // method by the AD file.
3135 // Note that the scratch buffer blob and locs memory are
3136 // allocated at the beginning of the compile task, and
3137 // may be shared by several calls to scratch_emit_size.
3138 // The allocation of the scratch buffer blob is particularly
3139 // expensive, since it has to grab the code cache lock.
3140 BufferBlob* blob = this->scratch_buffer_blob();
3141 assert(blob != nullptr, "Initialize BufferBlob at start");
3142 assert(blob->size() > max_inst_size(), "sanity");
3143 relocInfo* locs_buf = scratch_locs_memory();
3144 address blob_begin = blob->content_begin();
3145 address blob_end = (address)locs_buf;
3146 assert(blob->contains(blob_end), "sanity");
3147 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3148 buf.initialize_consts_size(_scratch_const_size);
3149 buf.initialize_stubs_size(MAX_stubs_size);
3150 assert(locs_buf != nullptr, "sanity");
3151 int lsize = MAX_locs_size / 3;
3152 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3153 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3154 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3155 // Mark as scratch buffer.
3156 buf.consts()->set_scratch_emit();
3157 buf.insts()->set_scratch_emit();
3158 buf.stubs()->set_scratch_emit();
3159
3160 // Do the emission.
3161
3162 Label fakeL; // Fake label for branch instructions.
3167 masm.bind(fakeL);
3168 if (is_branch) {
3169 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3170 n->as_MachBranch()->label_set(&fakeL, 0);
3171 }
3172 n->emit(&masm, C->regalloc());
3173
3174 // Emitting into the scratch buffer should not fail
3175 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3176
3177 if (is_branch) // Restore label.
3178 n->as_MachBranch()->label_set(saveL, save_bnum);
3179
3180 // End scratch_emit_size section.
3181 set_in_scratch_emit_size(false);
3182
3183 return buf.insts_size();
3184 }
3185
3186 void PhaseOutput::install() {
3187 if (C->should_install_code() && C->stub_function() != nullptr) {
3188 install_stub(C->stub_name());
3189 } else {
3190 install_code(C->method(),
3191 C->entry_bci(),
3192 CompilerThread::current()->compiler(),
3193 C->has_unsafe_access(),
3194 SharedRuntime::is_wide_vector(C->max_vector_size()));
3195 }
3196 }
3197
3198 void PhaseOutput::install_code(ciMethod* target,
3199 int entry_bci,
3200 AbstractCompiler* compiler,
3201 bool has_unsafe_access,
3202 bool has_wide_vectors) {
3203 // Check if we want to skip execution of all compiled code.
3204 {
3205 #ifndef PRODUCT
3206 if (OptoNoExecute) {
3207 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3208 return;
3209 }
3210 #endif
3211 Compile::TracePhase tp(_t_registerMethod);
3212
3217 if (!target->is_static()) {
3218 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3219 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3220 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3221 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3222 }
3223 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3224 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3225 }
3226
3227 C->env()->register_method(target,
3228 entry_bci,
3229 &_code_offsets,
3230 _orig_pc_slot_offset_in_bytes,
3231 code_buffer(),
3232 frame_size_in_words(),
3233 oop_map_set(),
3234 &_handler_table,
3235 inc_table(),
3236 compiler,
3237 C->has_clinit_barriers(),
3238 C->for_preload(),
3239 has_unsafe_access,
3240 SharedRuntime::is_wide_vector(C->max_vector_size()),
3241 C->has_monitors(),
3242 C->has_scoped_access(),
3243 0,
3244 C->should_install_code());
3245
3246 if (C->log() != nullptr) { // Print code cache state into compiler log
3247 C->log()->code_cache_state();
3248 }
3249 assert(!C->has_clinit_barriers() || C->for_preload(), "class init barriers should be only in preload code");
3250 }
3251 }
3252 void PhaseOutput::install_stub(const char* stub_name) {
3253 // Entry point will be accessed using stub_entry_point();
3254 if (code_buffer() == nullptr) {
3255 Matcher::soft_match_failure();
3256 } else {
3257 if (PrintAssembly && (WizardMode || Verbose))
3258 tty->print_cr("### Stub::%s", stub_name);
3259
3260 if (!C->failing()) {
3261 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3262
3263 // Make the NMethod
3264 // For now we mark the frame as never safe for profile stackwalking
3265 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3266 code_buffer(),
3267 CodeOffsets::frame_never_safe,
3268 // _code_offsets.value(CodeOffsets::Frame_Complete),
3269 frame_size_in_words(),
3406
3407 // Dump the exception table as well
3408 if( n->is_Catch() && (Verbose || WizardMode) ) {
3409 // Print the exception table for this offset
3410 _handler_table.print_subtable_for(pc);
3411 }
3412 st->bol(); // Make sure we start on a new line
3413 }
3414 st->cr(); // one empty line between blocks
3415 } // End of per-block dump
3416
3417 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3418 }
3419 #endif
3420
3421 #ifndef PRODUCT
3422 void PhaseOutput::print_statistics() {
3423 Scheduling::print_statistics();
3424 }
3425 #endif
3426
3427 int PhaseOutput::max_inst_size() {
3428 if (AOTCodeCache::maybe_dumping_code()) {
3429 // See the comment in output.hpp.
3430 return 16384;
3431 } else {
3432 return mainline_MAX_inst_size;
3433 }
3434 }
3435
3436 int PhaseOutput::max_inst_gcstub_size() {
3437 assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3438 return mainline_MAX_inst_size;
3439 }
|