9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/debugInfo.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compilerDirectives.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/c2/barrierSetC2.hpp"
35 #include "memory/allocation.hpp"
36 #include "opto/ad.hpp"
37 #include "opto/block.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/c2_MacroAssembler.hpp"
40 #include "opto/callnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/locknode.hpp"
43 #include "opto/machnode.hpp"
44 #include "opto/node.hpp"
45 #include "opto/optoreg.hpp"
46 #include "opto/output.hpp"
47 #include "opto/regalloc.hpp"
48 #include "opto/type.hpp"
1352 // Initialize the space for the BufferBlob used to find and verify
1353 // instruction size in MachNode::emit_size()
1354 init_scratch_buffer_blob(const_req);
1355 }
1356
1357 CodeBuffer* PhaseOutput::init_buffer() {
1358 int stub_req = _buf_sizes._stub;
1359 int code_req = _buf_sizes._code;
1360 int const_req = _buf_sizes._const;
1361
1362 int pad_req = NativeCall::byte_size();
1363
1364 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1365 stub_req += bs->estimate_stub_size();
1366
1367 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1368 // class HandlerImpl is platform-specific and defined in the *.ad files.
1369 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1370 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1371 stub_req += MAX_stubs_size; // ensure per-stub margin
1372 code_req += MAX_inst_size; // ensure per-instruction margin
1373
1374 if (StressCodeBuffers)
1375 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1376
1377 int total_req =
1378 const_req +
1379 code_req +
1380 pad_req +
1381 stub_req +
1382 exception_handler_req +
1383 deopt_handler_req; // deopt handler
1384
1385 if (C->has_method_handle_invokes())
1386 total_req += deopt_handler_req; // deopt MH handler
1387
1388 CodeBuffer* cb = code_buffer();
1389 cb->set_const_section_alignment(constant_table().alignment());
1390 cb->initialize(total_req, _buf_sizes._reloc);
1391
1392 // Have we run out of code space?
1549 int padding = mach->compute_padding(current_offset);
1550 // Make sure safepoint node for polling is distinct from a call's
1551 // return by adding a nop if needed.
1552 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1553 padding = nop_size;
1554 }
1555 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1556 current_offset == last_avoid_back_to_back_offset) {
1557 // Avoid back to back some instructions.
1558 padding = nop_size;
1559 }
1560
1561 if (padding > 0) {
1562 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1563 int nops_cnt = padding / nop_size;
1564 MachNode *nop = new MachNopNode(nops_cnt);
1565 block->insert_node(nop, j++);
1566 last_inst++;
1567 C->cfg()->map_node_to_block(nop, block);
1568 // Ensure enough space.
1569 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1570 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1571 C->record_failure("CodeCache is full");
1572 return;
1573 }
1574 nop->emit(masm, C->regalloc());
1575 masm->code()->flush_bundle(true);
1576 current_offset = masm->offset();
1577 }
1578
1579 bool observe_safepoint = is_sfn;
1580 // Remember the start of the last call in a basic block
1581 if (is_mcall) {
1582 MachCallNode *mcall = mach->as_MachCall();
1583
1584 // This destination address is NOT PC-relative
1585 mcall->method_set((intptr_t)mcall->entry_point());
1586
1587 // Save the return address
1588 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1589
1678 Label *blkLabel = &blk_labels[block_num];
1679 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1680 }
1681 }
1682 }
1683 } else if (!n->is_Proj()) {
1684 // Remember the beginning of the previous instruction, in case
1685 // it's followed by a flag-kill and a null-check. Happens on
1686 // Intel all the time, with add-to-memory kind of opcodes.
1687 previous_offset = current_offset;
1688 }
1689
1690 // Not an else-if!
1691 // If this is a trap based cmp then add its offset to the list.
1692 if (mach->is_TrapBasedCheckNode()) {
1693 inct_starts[inct_cnt++] = current_offset;
1694 }
1695 }
1696
1697 // Verify that there is sufficient space remaining
1698 masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1699 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1700 C->record_failure("CodeCache is full");
1701 return;
1702 }
1703
1704 // Save the offset for the listing
1705 #if defined(SUPPORT_OPTO_ASSEMBLY)
1706 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1707 node_offsets[n->_idx] = masm->offset();
1708 }
1709 #endif
1710 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1711
1712 // "Normal" instruction case
1713 DEBUG_ONLY(uint instr_offset = masm->offset());
1714 n->emit(masm, C->regalloc());
1715 current_offset = masm->offset();
1716
1717 // Above we only verified that there is enough space in the instruction section.
1718 // However, the instruction may emit stubs that cause code buffer expansion.
3335
3336
3337 //-----------------------scratch_emit_size-------------------------------------
3338 // Helper function that computes size by emitting code
3339 uint PhaseOutput::scratch_emit_size(const Node* n) {
3340 // Start scratch_emit_size section.
3341 set_in_scratch_emit_size(true);
3342
3343 // Emit into a trash buffer and count bytes emitted.
3344 // This is a pretty expensive way to compute a size,
3345 // but it works well enough if seldom used.
3346 // All common fixed-size instructions are given a size
3347 // method by the AD file.
3348 // Note that the scratch buffer blob and locs memory are
3349 // allocated at the beginning of the compile task, and
3350 // may be shared by several calls to scratch_emit_size.
3351 // The allocation of the scratch buffer blob is particularly
3352 // expensive, since it has to grab the code cache lock.
3353 BufferBlob* blob = this->scratch_buffer_blob();
3354 assert(blob != nullptr, "Initialize BufferBlob at start");
3355 assert(blob->size() > MAX_inst_size, "sanity");
3356 relocInfo* locs_buf = scratch_locs_memory();
3357 address blob_begin = blob->content_begin();
3358 address blob_end = (address)locs_buf;
3359 assert(blob->contains(blob_end), "sanity");
3360 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3361 buf.initialize_consts_size(_scratch_const_size);
3362 buf.initialize_stubs_size(MAX_stubs_size);
3363 assert(locs_buf != nullptr, "sanity");
3364 int lsize = MAX_locs_size / 3;
3365 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3366 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3367 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3368 // Mark as scratch buffer.
3369 buf.consts()->set_scratch_emit();
3370 buf.insts()->set_scratch_emit();
3371 buf.stubs()->set_scratch_emit();
3372
3373 // Do the emission.
3374
3375 Label fakeL; // Fake label for branch instructions.
3380 masm.bind(fakeL);
3381 if (is_branch) {
3382 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3383 n->as_MachBranch()->label_set(&fakeL, 0);
3384 }
3385 n->emit(&masm, C->regalloc());
3386
3387 // Emitting into the scratch buffer should not fail
3388 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3389
3390 if (is_branch) // Restore label.
3391 n->as_MachBranch()->label_set(saveL, save_bnum);
3392
3393 // End scratch_emit_size section.
3394 set_in_scratch_emit_size(false);
3395
3396 return buf.insts_size();
3397 }
3398
3399 void PhaseOutput::install() {
3400 if (!C->should_install_code()) {
3401 return;
3402 } else if (C->stub_function() != nullptr) {
3403 install_stub(C->stub_name());
3404 } else {
3405 install_code(C->method(),
3406 C->entry_bci(),
3407 CompileBroker::compiler2(),
3408 C->has_unsafe_access(),
3409 SharedRuntime::is_wide_vector(C->max_vector_size()));
3410 }
3411 }
3412
3413 void PhaseOutput::install_code(ciMethod* target,
3414 int entry_bci,
3415 AbstractCompiler* compiler,
3416 bool has_unsafe_access,
3417 bool has_wide_vectors) {
3418 // Check if we want to skip execution of all compiled code.
3419 {
3420 #ifndef PRODUCT
3421 if (OptoNoExecute) {
3422 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3423 return;
3424 }
3425 #endif
3426 Compile::TracePhase tp(_t_registerMethod);
3427
3432 if (!target->is_static()) {
3433 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3434 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3435 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3436 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3437 }
3438 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3439 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3440 }
3441
3442 C->env()->register_method(target,
3443 entry_bci,
3444 &_code_offsets,
3445 _orig_pc_slot_offset_in_bytes,
3446 code_buffer(),
3447 frame_size_in_words(),
3448 oop_map_set(),
3449 &_handler_table,
3450 inc_table(),
3451 compiler,
3452 has_unsafe_access,
3453 SharedRuntime::is_wide_vector(C->max_vector_size()),
3454 C->has_monitors(),
3455 C->has_scoped_access(),
3456 0);
3457
3458 if (C->log() != nullptr) { // Print code cache state into compiler log
3459 C->log()->code_cache_state();
3460 }
3461 }
3462 }
3463 void PhaseOutput::install_stub(const char* stub_name) {
3464 // Entry point will be accessed using stub_entry_point();
3465 if (code_buffer() == nullptr) {
3466 Matcher::soft_match_failure();
3467 } else {
3468 if (PrintAssembly && (WizardMode || Verbose))
3469 tty->print_cr("### Stub::%s", stub_name);
3470
3471 if (!C->failing()) {
3472 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3473
3474 // Make the NMethod
3475 // For now we mark the frame as never safe for profile stackwalking
3476 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3477 code_buffer(),
3478 CodeOffsets::frame_never_safe,
3479 // _code_offsets.value(CodeOffsets::Frame_Complete),
3480 frame_size_in_words(),
3639 // Dump the exception table as well
3640 if( n->is_Catch() && (Verbose || WizardMode) ) {
3641 // Print the exception table for this offset
3642 _handler_table.print_subtable_for(pc);
3643 }
3644 st->bol(); // Make sure we start on a new line
3645 }
3646 st->cr(); // one empty line between blocks
3647 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3648 } // End of per-block dump
3649
3650 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3651 }
3652 #endif
3653
3654 #ifndef PRODUCT
3655 void PhaseOutput::print_statistics() {
3656 Scheduling::print_statistics();
3657 }
3658 #endif
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/assembler.inline.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/debugInfo.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/SCCache.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compilerDirectives.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/c2/barrierSetC2.hpp"
36 #include "memory/allocation.hpp"
37 #include "opto/ad.hpp"
38 #include "opto/block.hpp"
39 #include "opto/c2compiler.hpp"
40 #include "opto/c2_MacroAssembler.hpp"
41 #include "opto/callnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/locknode.hpp"
44 #include "opto/machnode.hpp"
45 #include "opto/node.hpp"
46 #include "opto/optoreg.hpp"
47 #include "opto/output.hpp"
48 #include "opto/regalloc.hpp"
49 #include "opto/type.hpp"
1353 // Initialize the space for the BufferBlob used to find and verify
1354 // instruction size in MachNode::emit_size()
1355 init_scratch_buffer_blob(const_req);
1356 }
1357
1358 CodeBuffer* PhaseOutput::init_buffer() {
1359 int stub_req = _buf_sizes._stub;
1360 int code_req = _buf_sizes._code;
1361 int const_req = _buf_sizes._const;
1362
1363 int pad_req = NativeCall::byte_size();
1364
1365 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1366 stub_req += bs->estimate_stub_size();
1367
1368 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1369 // class HandlerImpl is platform-specific and defined in the *.ad files.
1370 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1371 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1372 stub_req += MAX_stubs_size; // ensure per-stub margin
1373 code_req += max_inst_size(); // ensure per-instruction margin
1374
1375 if (StressCodeBuffers)
1376 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1377
1378 int total_req =
1379 const_req +
1380 code_req +
1381 pad_req +
1382 stub_req +
1383 exception_handler_req +
1384 deopt_handler_req; // deopt handler
1385
1386 if (C->has_method_handle_invokes())
1387 total_req += deopt_handler_req; // deopt MH handler
1388
1389 CodeBuffer* cb = code_buffer();
1390 cb->set_const_section_alignment(constant_table().alignment());
1391 cb->initialize(total_req, _buf_sizes._reloc);
1392
1393 // Have we run out of code space?
1550 int padding = mach->compute_padding(current_offset);
1551 // Make sure safepoint node for polling is distinct from a call's
1552 // return by adding a nop if needed.
1553 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1554 padding = nop_size;
1555 }
1556 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1557 current_offset == last_avoid_back_to_back_offset) {
1558 // Avoid back to back some instructions.
1559 padding = nop_size;
1560 }
1561
1562 if (padding > 0) {
1563 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1564 int nops_cnt = padding / nop_size;
1565 MachNode *nop = new MachNopNode(nops_cnt);
1566 block->insert_node(nop, j++);
1567 last_inst++;
1568 C->cfg()->map_node_to_block(nop, block);
1569 // Ensure enough space.
1570 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1571 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1572 C->record_failure("CodeCache is full");
1573 return;
1574 }
1575 nop->emit(masm, C->regalloc());
1576 masm->code()->flush_bundle(true);
1577 current_offset = masm->offset();
1578 }
1579
1580 bool observe_safepoint = is_sfn;
1581 // Remember the start of the last call in a basic block
1582 if (is_mcall) {
1583 MachCallNode *mcall = mach->as_MachCall();
1584
1585 // This destination address is NOT PC-relative
1586 mcall->method_set((intptr_t)mcall->entry_point());
1587
1588 // Save the return address
1589 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1590
1679 Label *blkLabel = &blk_labels[block_num];
1680 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1681 }
1682 }
1683 }
1684 } else if (!n->is_Proj()) {
1685 // Remember the beginning of the previous instruction, in case
1686 // it's followed by a flag-kill and a null-check. Happens on
1687 // Intel all the time, with add-to-memory kind of opcodes.
1688 previous_offset = current_offset;
1689 }
1690
1691 // Not an else-if!
1692 // If this is a trap based cmp then add its offset to the list.
1693 if (mach->is_TrapBasedCheckNode()) {
1694 inct_starts[inct_cnt++] = current_offset;
1695 }
1696 }
1697
1698 // Verify that there is sufficient space remaining
1699 masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1700 if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1701 C->record_failure("CodeCache is full");
1702 return;
1703 }
1704
1705 // Save the offset for the listing
1706 #if defined(SUPPORT_OPTO_ASSEMBLY)
1707 if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1708 node_offsets[n->_idx] = masm->offset();
1709 }
1710 #endif
1711 assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1712
1713 // "Normal" instruction case
1714 DEBUG_ONLY(uint instr_offset = masm->offset());
1715 n->emit(masm, C->regalloc());
1716 current_offset = masm->offset();
1717
1718 // Above we only verified that there is enough space in the instruction section.
1719 // However, the instruction may emit stubs that cause code buffer expansion.
3336
3337
3338 //-----------------------scratch_emit_size-------------------------------------
3339 // Helper function that computes size by emitting code
3340 uint PhaseOutput::scratch_emit_size(const Node* n) {
3341 // Start scratch_emit_size section.
3342 set_in_scratch_emit_size(true);
3343
3344 // Emit into a trash buffer and count bytes emitted.
3345 // This is a pretty expensive way to compute a size,
3346 // but it works well enough if seldom used.
3347 // All common fixed-size instructions are given a size
3348 // method by the AD file.
3349 // Note that the scratch buffer blob and locs memory are
3350 // allocated at the beginning of the compile task, and
3351 // may be shared by several calls to scratch_emit_size.
3352 // The allocation of the scratch buffer blob is particularly
3353 // expensive, since it has to grab the code cache lock.
3354 BufferBlob* blob = this->scratch_buffer_blob();
3355 assert(blob != nullptr, "Initialize BufferBlob at start");
3356 assert(blob->size() > max_inst_size(), "sanity");
3357 relocInfo* locs_buf = scratch_locs_memory();
3358 address blob_begin = blob->content_begin();
3359 address blob_end = (address)locs_buf;
3360 assert(blob->contains(blob_end), "sanity");
3361 CodeBuffer buf(blob_begin, blob_end - blob_begin);
3362 buf.initialize_consts_size(_scratch_const_size);
3363 buf.initialize_stubs_size(MAX_stubs_size);
3364 assert(locs_buf != nullptr, "sanity");
3365 int lsize = MAX_locs_size / 3;
3366 buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3367 buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3368 buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3369 // Mark as scratch buffer.
3370 buf.consts()->set_scratch_emit();
3371 buf.insts()->set_scratch_emit();
3372 buf.stubs()->set_scratch_emit();
3373
3374 // Do the emission.
3375
3376 Label fakeL; // Fake label for branch instructions.
3381 masm.bind(fakeL);
3382 if (is_branch) {
3383 n->as_MachBranch()->save_label(&saveL, &save_bnum);
3384 n->as_MachBranch()->label_set(&fakeL, 0);
3385 }
3386 n->emit(&masm, C->regalloc());
3387
3388 // Emitting into the scratch buffer should not fail
3389 assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3390
3391 if (is_branch) // Restore label.
3392 n->as_MachBranch()->label_set(saveL, save_bnum);
3393
3394 // End scratch_emit_size section.
3395 set_in_scratch_emit_size(false);
3396
3397 return buf.insts_size();
3398 }
3399
3400 void PhaseOutput::install() {
3401 if (C->should_install_code() && C->stub_function() != nullptr) {
3402 install_stub(C->stub_name());
3403 } else {
3404 install_code(C->method(),
3405 C->entry_bci(),
3406 CompilerThread::current()->compiler(),
3407 C->has_unsafe_access(),
3408 SharedRuntime::is_wide_vector(C->max_vector_size()));
3409 }
3410 }
3411
3412 void PhaseOutput::install_code(ciMethod* target,
3413 int entry_bci,
3414 AbstractCompiler* compiler,
3415 bool has_unsafe_access,
3416 bool has_wide_vectors) {
3417 // Check if we want to skip execution of all compiled code.
3418 {
3419 #ifndef PRODUCT
3420 if (OptoNoExecute) {
3421 C->record_method_not_compilable("+OptoNoExecute"); // Flag as failed
3422 return;
3423 }
3424 #endif
3425 Compile::TracePhase tp(_t_registerMethod);
3426
3431 if (!target->is_static()) {
3432 // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3433 // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3434 // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3435 _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3436 }
3437 _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3438 _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3439 }
3440
3441 C->env()->register_method(target,
3442 entry_bci,
3443 &_code_offsets,
3444 _orig_pc_slot_offset_in_bytes,
3445 code_buffer(),
3446 frame_size_in_words(),
3447 oop_map_set(),
3448 &_handler_table,
3449 inc_table(),
3450 compiler,
3451 C->has_clinit_barriers(),
3452 C->for_preload(),
3453 has_unsafe_access,
3454 SharedRuntime::is_wide_vector(C->max_vector_size()),
3455 C->has_monitors(),
3456 C->has_scoped_access(),
3457 0,
3458 C->should_install_code());
3459
3460 if (C->log() != nullptr) { // Print code cache state into compiler log
3461 C->log()->code_cache_state();
3462 }
3463 if (C->has_clinit_barriers()) {
3464 assert(C->for_preload(), "sanity");
3465 // Build second version of code without class initialization barriers
3466 if (C->env()->task()->compile_reason() == CompileTask::Reason_PrecompileForPreload) {
3467 // don't automatically precompile a barrier-free version unless explicitly asked
3468 } else {
3469 C->record_failure(C2Compiler::retry_no_clinit_barriers());
3470 }
3471 }
3472 }
3473 }
3474 void PhaseOutput::install_stub(const char* stub_name) {
3475 // Entry point will be accessed using stub_entry_point();
3476 if (code_buffer() == nullptr) {
3477 Matcher::soft_match_failure();
3478 } else {
3479 if (PrintAssembly && (WizardMode || Verbose))
3480 tty->print_cr("### Stub::%s", stub_name);
3481
3482 if (!C->failing()) {
3483 assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3484
3485 // Make the NMethod
3486 // For now we mark the frame as never safe for profile stackwalking
3487 RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3488 code_buffer(),
3489 CodeOffsets::frame_never_safe,
3490 // _code_offsets.value(CodeOffsets::Frame_Complete),
3491 frame_size_in_words(),
3650 // Dump the exception table as well
3651 if( n->is_Catch() && (Verbose || WizardMode) ) {
3652 // Print the exception table for this offset
3653 _handler_table.print_subtable_for(pc);
3654 }
3655 st->bol(); // Make sure we start on a new line
3656 }
3657 st->cr(); // one empty line between blocks
3658 assert(cut_short || delay == nullptr, "no unconditional delay branch");
3659 } // End of per-block dump
3660
3661 if (cut_short) st->print_cr("*** disassembly is cut short ***");
3662 }
3663 #endif
3664
3665 #ifndef PRODUCT
3666 void PhaseOutput::print_statistics() {
3667 Scheduling::print_statistics();
3668 }
3669 #endif
3670
3671 int PhaseOutput::max_inst_size() {
3672 if (SCCache::is_on_for_write()) {
3673 // See the comment in output.hpp.
3674 return 16384;
3675 } else {
3676 return mainline_MAX_inst_size;
3677 }
3678 }
3679
3680 int PhaseOutput::max_inst_gcstub_size() {
3681 assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3682 return mainline_MAX_inst_size;
3683 }
|