1352 // Initialize the space for the BufferBlob used to find and verify
1353 // instruction size in MachNode::emit_size()
1354 init_scratch_buffer_blob(const_req);
1355 }
1356
1357 CodeBuffer* PhaseOutput::init_buffer() {
1358 int stub_req = _buf_sizes._stub;
1359 int code_req = _buf_sizes._code;
1360 int const_req = _buf_sizes._const;
1361
1362 int pad_req = NativeCall::byte_size();
1363
1364 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1365 stub_req += bs->estimate_stub_size();
1366
1367 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1368 // class HandlerImpl is platform-specific and defined in the *.ad files.
1369 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1370 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1371 stub_req += MAX_stubs_size; // ensure per-stub margin
1372 code_req += MAX_inst_size; // ensure per-instruction margin
1373
1374 if (StressCodeBuffers)
1375 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1376
1377 int total_req =
1378 const_req +
1379 code_req +
1380 pad_req +
1381 stub_req +
1382 exception_handler_req +
1383 deopt_handler_req; // deopt handler
1384
1385 if (C->has_method_handle_invokes())
1386 total_req += deopt_handler_req; // deopt MH handler
1387
1388 CodeBuffer* cb = code_buffer();
1389 cb->set_const_section_alignment(constant_table().alignment());
1390 cb->initialize(total_req, _buf_sizes._reloc);
1391
1392 // Have we run out of code space?
1549 int padding = mach->compute_padding(current_offset);
1550 // Make sure safepoint node for polling is distinct from a call's
1551 // return by adding a nop if needed.
1552 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1553 padding = nop_size;
1554 }
1555 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1556 current_offset == last_avoid_back_to_back_offset) {
1557 // Avoid back to back some instructions.
1558 padding = nop_size;
1559 }
1560
1561 if (padding > 0) {
1562 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1563 int nops_cnt = padding / nop_size;
1564 MachNode *nop = new MachNopNode(nops_cnt);
1565 block->insert_node(nop, j++);
1566 last_inst++;
1567 C->cfg()->map_node_to_block(nop, block);
1568 // Ensure enough space.
1569 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1570 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1571 C->record_failure("CodeCache is full");
1572 return;
1573 }
1574 nop->emit(masm, C->regalloc());
1575 masm->code()->flush_bundle(true);
1576 current_offset = masm->offset();
1577 }
1578
1579 bool observe_safepoint = is_sfn;
1580 // Remember the start of the last call in a basic block
1581 if (is_mcall) {
1582 MachCallNode *mcall = mach->as_MachCall();
1583
1584 // This destination address is NOT PC-relative
1585 mcall->method_set((intptr_t)mcall->entry_point());
1586
1587 // Save the return address
1588 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1589
1678 Label *blkLabel = &blk_labels[block_num];
1679 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1680 }
1681 }
1682 }
1683 } else if (!n->is_Proj()) {
1684 // Remember the beginning of the previous instruction, in case
1685 // it's followed by a flag-kill and a null-check. Happens on
1686 // Intel all the time, with add-to-memory kind of opcodes.
1687 previous_offset = current_offset;
1688 }
1689
1690 // Not an else-if!
1691 // If this is a trap based cmp then add its offset to the list.
1692 if (mach->is_TrapBasedCheckNode()) {
1693 inct_starts[inct_cnt++] = current_offset;
1694 }
1695 }
1696
1697 // Verify that there is sufficient space remaining
1698 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1699 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1700 C->record_failure("CodeCache is full");
1701 return;
1702 }
1703
1704 // Save the offset for the listing
1705 #if defined(SUPPORT_OPTO_ASSEMBLY)
1706 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1707 node_offsets[n->_idx] = masm->offset();
1708 }
1709 #endif
1710 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1711
1712 // "Normal" instruction case
1713 DEBUG_ONLY(uint instr_offset = masm->offset());
1714 n->emit(masm, C->regalloc());
1715 current_offset = masm->offset();
1716
1717 // Above we only verified that there is enough space in the instruction section.
1718 // However, the instruction may emit stubs that cause code buffer expansion.
1904 ttyLocker ttyl; // keep the following output all in one block
1905 if (!VMThread::should_terminate()) { // test this under the tty lock
1906 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1907 // We call them first and write to a stringStream, then we retake the lock to
1908 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1909 ResourceMark rm;
1910 stringStream method_metadata_str;
1911 if (C->method() != nullptr) {
1912 C->method()->print_metadata(&method_metadata_str);
1913 }
1914 stringStream dump_asm_str;
1915 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1916
1917 NoSafepointVerifier nsv;
1918 ttyLocker ttyl2;
1919 // This output goes directly to the tty, not the compiler log.
1920 // To enable tools to match it up with the compilation activity,
1921 // be sure to tag this tty output with the compile ID.
1922 if (xtty != nullptr) {
1923 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1924 C->is_osr_compilation() ? " compile_kind='osr'" : "");
1925 }
1926 if (C->method() != nullptr) {
1927 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1928 tty->print_raw(method_metadata_str.freeze());
1929 } else if (C->stub_name() != nullptr) {
1930 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1931 }
1932 tty->cr();
1933 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1934 tty->print_raw(dump_asm_str.freeze());
1935 tty->print_cr("--------------------------------------------------------------------------------");
1936 if (xtty != nullptr) {
1937 xtty->tail("opto_assembly");
1938 }
1939 }
1940 }
1941 #endif
1942 }
1943
1944 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3337
3338
3339 //-----------------------scratch_emit_size-------------------------------------
3340 // Helper function that computes size by emitting code
3341 uint PhaseOutput::scratch_emit_size(const Node* n) {
3342 // Start scratch_emit_size section.
3343 set_in_scratch_emit_size(true);
3344
3345 // Emit into a trash buffer and count bytes emitted.
3346 // This is a pretty expensive way to compute a size,
3347 // but it works well enough if seldom used.
3348 // All common fixed-size instructions are given a size
3349 // method by the AD file.
3350 // Note that the scratch buffer blob and locs memory are
3351 // allocated at the beginning of the compile task, and
3352 // may be shared by several calls to scratch_emit_size.
3353 // The allocation of the scratch buffer blob is particularly
3354 // expensive, since it has to grab the code cache lock.
3355 BufferBlob* blob = this->scratch_buffer_blob();
3356 assert(blob != nullptr, "Initialize BufferBlob at start");
3357 assert(blob->size() > MAX_inst_size, "sanity");
3358 relocInfo* locs_buf = scratch_locs_memory();
3359 address blob_begin = blob->content_begin();
3360 address blob_end = (address)locs_buf;
3361 assert(blob->contains(blob_end), "sanity");
3362 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3363 buf.initialize_consts_size(_scratch_const_size);
3364 buf.initialize_stubs_size(MAX_stubs_size);
3365 assert(locs_buf != nullptr, "sanity");
3366 int lsize = MAX_locs_size / 3;
3367 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3368 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3369 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3370 // Mark as scratch buffer.
3371 buf.consts()->set_scratch_emit();
3372 buf.insts()->set_scratch_emit();
3373 buf.stubs()->set_scratch_emit();
3374
3375 // Do the emission.
3376
3377 Label fakeL; // Fake label for branch instructions.
3382 masm.bind(fakeL);
3383 if (is_branch) {
3384 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3385 n->as_MachBranch()->label_set(&fakeL, 0);
3386 }
3387 n->emit(&masm, C->regalloc());
3388
3389 // Emitting into the scratch buffer should not fail
3390 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3391
3392 if (is_branch) // Restore label.
3393 n->as_MachBranch()->label_set(saveL, save_bnum);
3394
3395 // End scratch_emit_size section.
3396 set_in_scratch_emit_size(false);
3397
3398 return buf.insts_size();
3399 }
3400
3401 void PhaseOutput::install() {
3402 if (!C->should_install_code()) {
3403 return;
3404 } else if (C->stub_function() != nullptr) {
3405 install_stub(C->stub_name());
3406 } else {
3407 install_code(C->method(),
3408 C->entry_bci(),
3409 CompileBroker::compiler2(),
3410 C->has_unsafe_access(),
3411 SharedRuntime::is_wide_vector(C->max_vector_size()));
3412 }
3413 }
3414
3415 void PhaseOutput::install_code(ciMethod* target,
3416 int entry_bci,
3417 AbstractCompiler* compiler,
3418 bool has_unsafe_access,
3419 bool has_wide_vectors) {
3420 // Check if we want to skip execution of all compiled code.
3421 {
3422 #ifndef PRODUCT
3423 if (OptoNoExecute) {
3424 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3425 return;
3426 }
3427 #endif
3428 Compile::TracePhase tp(_t_registerMethod);
3429
3434 if (!target->is_static()) {
3435 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3436 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3437 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3438 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3439 }
3440 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3441 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3442 }
3443
3444 C->env()->register_method(target,
3445 entry_bci,
3446 &_code_offsets,
3447 _orig_pc_slot_offset_in_bytes,
3448 code_buffer(),
3449 frame_size_in_words(),
3450 oop_map_set(),
3451 &_handler_table,
3452 inc_table(),
3453 compiler,
3454 has_unsafe_access,
3455 SharedRuntime::is_wide_vector(C->max_vector_size()),
3456 C->has_monitors(),
3457 C->has_scoped_access(),
3458 0);
3459
3460 if (C->log() != nullptr) { // Print code cache state into compiler log
3461 C->log()->code_cache_state();
3462 }
3463 }
3464 }
3465 void PhaseOutput::install_stub(const char* stub_name) {
3466 // Entry point will be accessed using stub_entry_point();
3467 if (code_buffer() == nullptr) {
3468 Matcher::soft_match_failure();
3469 } else {
3470 if (PrintAssembly && (WizardMode || Verbose))
3471 tty->print_cr("### Stub::%s", stub_name);
3472
3473 if (!C->failing()) {
3474 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3475
3476 // Make the NMethod
3477 // For now we mark the frame as never safe for profile stackwalking
3478 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3479 code_buffer(),
3480 CodeOffsets::frame_never_safe,
3481 // _code_offsets.value(CodeOffsets::Frame_Complete),
3482 frame_size_in_words(),
3648 // Dump the exception table as well
3649 if( n->is_Catch() && (Verbose || WizardMode) ) {
3650 // Print the exception table for this offset
3651 _handler_table.print_subtable_for(pc);
3652 }
3653 st->bol(); // Make sure we start on a new line
3654 }
3655 st->cr(); // one empty line between blocks
3656 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3657 } // End of per-block dump
3658
3659 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3660 }
3661 #endif
3662
3663 #ifndef PRODUCT
3664 void PhaseOutput::print_statistics() {
3665 Scheduling::print_statistics();
3666 }
3667 #endif
|
1352 // Initialize the space for the BufferBlob used to find and verify
1353 // instruction size in MachNode::emit_size()
1354 init_scratch_buffer_blob(const_req);
1355 }
1356
1357 CodeBuffer* PhaseOutput::init_buffer() {
1358 int stub_req = _buf_sizes._stub;
1359 int code_req = _buf_sizes._code;
1360 int const_req = _buf_sizes._const;
1361
1362 int pad_req = NativeCall::byte_size();
1363
1364 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1365 stub_req += bs->estimate_stub_size();
1366
1367 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1368 // class HandlerImpl is platform-specific and defined in the *.ad files.
1369 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1370 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1371 stub_req += MAX_stubs_size; // ensure per-stub margin
1372 code_req += max_inst_size(); // ensure per-instruction margin
1373
1374 if (StressCodeBuffers)
1375 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1376
1377 int total_req =
1378 const_req +
1379 code_req +
1380 pad_req +
1381 stub_req +
1382 exception_handler_req +
1383 deopt_handler_req; // deopt handler
1384
1385 if (C->has_method_handle_invokes())
1386 total_req += deopt_handler_req; // deopt MH handler
1387
1388 CodeBuffer* cb = code_buffer();
1389 cb->set_const_section_alignment(constant_table().alignment());
1390 cb->initialize(total_req, _buf_sizes._reloc);
1391
1392 // Have we run out of code space?
1549 int padding = mach->compute_padding(current_offset);
1550 // Make sure safepoint node for polling is distinct from a call's
1551 // return by adding a nop if needed.
1552 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1553 padding = nop_size;
1554 }
1555 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1556 current_offset == last_avoid_back_to_back_offset) {
1557 // Avoid back to back some instructions.
1558 padding = nop_size;
1559 }
1560
1561 if (padding > 0) {
1562 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1563 int nops_cnt = padding / nop_size;
1564 MachNode *nop = new MachNopNode(nops_cnt);
1565 block->insert_node(nop, j++);
1566 last_inst++;
1567 C->cfg()->map_node_to_block(nop, block);
1568 // Ensure enough space.
1569 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1570 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1571 C->record_failure("CodeCache is full");
1572 return;
1573 }
1574 nop->emit(masm, C->regalloc());
1575 masm->code()->flush_bundle(true);
1576 current_offset = masm->offset();
1577 }
1578
1579 bool observe_safepoint = is_sfn;
1580 // Remember the start of the last call in a basic block
1581 if (is_mcall) {
1582 MachCallNode *mcall = mach->as_MachCall();
1583
1584 // This destination address is NOT PC-relative
1585 mcall->method_set((intptr_t)mcall->entry_point());
1586
1587 // Save the return address
1588 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1589
1678 Label *blkLabel = &blk_labels[block_num];
1679 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1680 }
1681 }
1682 }
1683 } else if (!n->is_Proj()) {
1684 // Remember the beginning of the previous instruction, in case
1685 // it's followed by a flag-kill and a null-check. Happens on
1686 // Intel all the time, with add-to-memory kind of opcodes.
1687 previous_offset = current_offset;
1688 }
1689
1690 // Not an else-if!
1691 // If this is a trap based cmp then add its offset to the list.
1692 if (mach->is_TrapBasedCheckNode()) {
1693 inct_starts[inct_cnt++] = current_offset;
1694 }
1695 }
1696
1697 // Verify that there is sufficient space remaining
1698 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1699 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1700 C->record_failure("CodeCache is full");
1701 return;
1702 }
1703
1704 // Save the offset for the listing
1705 #if defined(SUPPORT_OPTO_ASSEMBLY)
1706 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1707 node_offsets[n->_idx] = masm->offset();
1708 }
1709 #endif
1710 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1711
1712 // "Normal" instruction case
1713 DEBUG_ONLY(uint instr_offset = masm->offset());
1714 n->emit(masm, C->regalloc());
1715 current_offset = masm->offset();
1716
1717 // Above we only verified that there is enough space in the instruction section.
1718 // However, the instruction may emit stubs that cause code buffer expansion.
1904 ttyLocker ttyl; // keep the following output all in one block
1905 if (!VMThread::should_terminate()) { // test this under the tty lock
1906 // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1907 // We call them first and write to a stringStream, then we retake the lock to
1908 // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1909 ResourceMark rm;
1910 stringStream method_metadata_str;
1911 if (C->method() != nullptr) {
1912 C->method()->print_metadata(&method_metadata_str);
1913 }
1914 stringStream dump_asm_str;
1915 dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1916
1917 NoSafepointVerifier nsv;
1918 ttyLocker ttyl2;
1919 // This output goes directly to the tty, not the compiler log.
1920 // To enable tools to match it up with the compilation activity,
1921 // be sure to tag this tty output with the compile ID.
1922 if (xtty != nullptr) {
1923 xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1924 C->is_osr_compilation() ? " compile_kind='osr'" :
1925 (C->for_preload() ? " compile_kind='AP'" : ""));
1926 }
1927 if (C->method() != nullptr) {
1928 tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1929 tty->print_raw(method_metadata_str.freeze());
1930 } else if (C->stub_name() != nullptr) {
1931 tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1932 }
1933 tty->cr();
1934 tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1935 tty->print_raw(dump_asm_str.freeze());
1936 tty->print_cr("--------------------------------------------------------------------------------");
1937 if (xtty != nullptr) {
1938 xtty->tail("opto_assembly");
1939 }
1940 }
1941 }
1942 #endif
1943 }
1944
1945 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
3338
3339
3340 //-----------------------scratch_emit_size-------------------------------------
3341 // Helper function that computes size by emitting code
3342 uint PhaseOutput::scratch_emit_size(const Node* n) {
3343 // Start scratch_emit_size section.
3344 set_in_scratch_emit_size(true);
3345
3346 // Emit into a trash buffer and count bytes emitted.
3347 // This is a pretty expensive way to compute a size,
3348 // but it works well enough if seldom used.
3349 // All common fixed-size instructions are given a size
3350 // method by the AD file.
3351 // Note that the scratch buffer blob and locs memory are
3352 // allocated at the beginning of the compile task, and
3353 // may be shared by several calls to scratch_emit_size.
3354 // The allocation of the scratch buffer blob is particularly
3355 // expensive, since it has to grab the code cache lock.
3356 BufferBlob* blob = this->scratch_buffer_blob();
3357 assert(blob != nullptr, "Initialize BufferBlob at start");
3358 assert(blob->size() > max_inst_size(), "sanity");
3359 relocInfo* locs_buf = scratch_locs_memory();
3360 address blob_begin = blob->content_begin();
3361 address blob_end = (address)locs_buf;
3362 assert(blob->contains(blob_end), "sanity");
3363 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3364 buf.initialize_consts_size(_scratch_const_size);
3365 buf.initialize_stubs_size(MAX_stubs_size);
3366 assert(locs_buf != nullptr, "sanity");
3367 int lsize = MAX_locs_size / 3;
3368 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3369 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3370 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3371 // Mark as scratch buffer.
3372 buf.consts()->set_scratch_emit();
3373 buf.insts()->set_scratch_emit();
3374 buf.stubs()->set_scratch_emit();
3375
3376 // Do the emission.
3377
3378 Label fakeL; // Fake label for branch instructions.
3383 masm.bind(fakeL);
3384 if (is_branch) {
3385 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3386 n->as_MachBranch()->label_set(&fakeL, 0);
3387 }
3388 n->emit(&masm, C->regalloc());
3389
3390 // Emitting into the scratch buffer should not fail
3391 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3392
3393 if (is_branch) // Restore label.
3394 n->as_MachBranch()->label_set(saveL, save_bnum);
3395
3396 // End scratch_emit_size section.
3397 set_in_scratch_emit_size(false);
3398
3399 return buf.insts_size();
3400 }
3401
3402 void PhaseOutput::install() {
3403 if (C->should_install_code() && C->stub_function() != nullptr) {
3404 install_stub(C->stub_name());
3405 } else {
3406 install_code(C->method(),
3407 C->entry_bci(),
3408 CompilerThread::current()->compiler(),
3409 C->has_unsafe_access(),
3410 SharedRuntime::is_wide_vector(C->max_vector_size()));
3411 }
3412 }
3413
3414 void PhaseOutput::install_code(ciMethod* target,
3415 int entry_bci,
3416 AbstractCompiler* compiler,
3417 bool has_unsafe_access,
3418 bool has_wide_vectors) {
3419 // Check if we want to skip execution of all compiled code.
3420 {
3421 #ifndef PRODUCT
3422 if (OptoNoExecute) {
3423 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3424 return;
3425 }
3426 #endif
3427 Compile::TracePhase tp(_t_registerMethod);
3428
3433 if (!target->is_static()) {
3434 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3435 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3436 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3437 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3438 }
3439 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3440 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3441 }
3442
3443 C->env()->register_method(target,
3444 entry_bci,
3445 &_code_offsets,
3446 _orig_pc_slot_offset_in_bytes,
3447 code_buffer(),
3448 frame_size_in_words(),
3449 oop_map_set(),
3450 &_handler_table,
3451 inc_table(),
3452 compiler,
3453 C->has_clinit_barriers(),
3454 C->for_preload(),
3455 has_unsafe_access,
3456 SharedRuntime::is_wide_vector(C->max_vector_size()),
3457 C->has_monitors(),
3458 C->has_scoped_access(),
3459 0,
3460 C->should_install_code());
3461
3462 if (C->log() != nullptr) { // Print code cache state into compiler log
3463 C->log()->code_cache_state();
3464 }
3465 assert(!C->has_clinit_barriers() || C->for_preload(), "class init barriers should be only in preload code");
3466 }
3467 }
3468 void PhaseOutput::install_stub(const char* stub_name) {
3469 // Entry point will be accessed using stub_entry_point();
3470 if (code_buffer() == nullptr) {
3471 Matcher::soft_match_failure();
3472 } else {
3473 if (PrintAssembly && (WizardMode || Verbose))
3474 tty->print_cr("### Stub::%s", stub_name);
3475
3476 if (!C->failing()) {
3477 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3478
3479 // Make the NMethod
3480 // For now we mark the frame as never safe for profile stackwalking
3481 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3482 code_buffer(),
3483 CodeOffsets::frame_never_safe,
3484 // _code_offsets.value(CodeOffsets::Frame_Complete),
3485 frame_size_in_words(),
3651 // Dump the exception table as well
3652 if( n->is_Catch() && (Verbose || WizardMode) ) {
3653 // Print the exception table for this offset
3654 _handler_table.print_subtable_for(pc);
3655 }
3656 st->bol(); // Make sure we start on a new line
3657 }
3658 st->cr(); // one empty line between blocks
3659 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3660 } // End of per-block dump
3661
3662 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3663 }
3664 #endif
3665
3666 #ifndef PRODUCT
3667 void PhaseOutput::print_statistics() {
3668 Scheduling::print_statistics();
3669 }
3670 #endif
3671
3672 int PhaseOutput::max_inst_size() {
3673 if (AOTCodeCache::maybe_dumping_code()) {
3674 // See the comment in output.hpp.
3675 return 16384;
3676 } else {
3677 return mainline_MAX_inst_size;
3678 }
3679 }
3680
3681 int PhaseOutput::max_inst_gcstub_size() {
3682 assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3683 return mainline_MAX_inst_size;
3684 }
|