< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

1332 
1333   // Initialize the space for the BufferBlob used to find and verify
1334   // instruction size in MachNode::emit_size()
1335   init_scratch_buffer_blob(const_req);
1336 }
1337 
1338 CodeBuffer* PhaseOutput::init_buffer() {
1339   int stub_req  = _buf_sizes._stub;
1340   int code_req  = _buf_sizes._code;
1341   int const_req = _buf_sizes._const;
1342 
1343   int pad_req   = NativeCall::byte_size();
1344 
1345   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1346   stub_req += bs->estimate_stub_size();
1347 
1348   // nmethod and CodeBuffer count stubs & constants as part of method's code.
1349   // class HandlerImpl is platform-specific and defined in the *.ad files.
1350   int deopt_handler_req     = HandlerImpl::size_deopt_handler()     + MAX_stubs_size; // add marginal slop for handler
1351   stub_req += MAX_stubs_size;   // ensure per-stub margin
1352   code_req += MAX_inst_size;    // ensure per-instruction margin
1353 
1354   if (StressCodeBuffers)
1355     code_req = const_req = stub_req = deopt_handler_req = 0x10;  // force expansion
1356 
1357   int total_req =
1358           const_req +
1359           code_req +
1360           pad_req +
1361           stub_req +
1362           deopt_handler_req;               // deopt handler
1363 
1364   CodeBuffer* cb = code_buffer();
1365   cb->set_const_section_alignment(constant_table().alignment());
1366   cb->initialize(total_req, _buf_sizes._reloc);
1367 
1368   // Have we run out of code space?
1369   if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1370     C->record_failure("CodeCache is full");
1371     return nullptr;
1372   }

1508         int padding = mach->compute_padding(current_offset);
1509         // Make sure safepoint node for polling is distinct from a call's
1510         // return by adding a nop if needed.
1511         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1512           padding = nop_size;
1513         }
1514         if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1515             current_offset == last_avoid_back_to_back_offset) {
1516           // Avoid back to back some instructions.
1517           padding = nop_size;
1518         }
1519 
1520         if (padding > 0) {
1521           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1522           int nops_cnt = padding / nop_size;
1523           MachNode *nop = new MachNopNode(nops_cnt);
1524           block->insert_node(nop, j++);
1525           last_inst++;
1526           C->cfg()->map_node_to_block(nop, block);
1527           // Ensure enough space.
1528           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1529           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1530             C->record_failure("CodeCache is full");
1531             return;
1532           }
1533           nop->emit(masm, C->regalloc());
1534           masm->code()->flush_bundle(true);
1535           current_offset = masm->offset();
1536         }
1537 
1538         bool observe_safepoint = is_sfn;
1539         // Remember the start of the last call in a basic block
1540         if (is_mcall) {
1541           MachCallNode *mcall = mach->as_MachCall();
1542 
1543           // This destination address is NOT PC-relative
1544           mcall->method_set((intptr_t)mcall->entry_point());
1545 
1546           // Save the return address
1547           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1548 

1634                 Label *blkLabel = &blk_labels[block_num];
1635                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1636               }
1637             }
1638           }
1639         } else if (!n->is_Proj()) {
1640           // Remember the beginning of the previous instruction, in case
1641           // it's followed by a flag-kill and a null-check.  Happens on
1642           // Intel all the time, with add-to-memory kind of opcodes.
1643           previous_offset = current_offset;
1644         }
1645 
1646         // Not an else-if!
1647         // If this is a trap based cmp then add its offset to the list.
1648         if (mach->is_TrapBasedCheckNode()) {
1649           inct_starts[inct_cnt++] = current_offset;
1650         }
1651       }
1652 
1653       // Verify that there is sufficient space remaining
1654       masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1655       if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1656         C->record_failure("CodeCache is full");
1657         return;
1658       }
1659 
1660       // Save the offset for the listing
1661 #if defined(SUPPORT_OPTO_ASSEMBLY)
1662       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1663         node_offsets[n->_idx] = masm->offset();
1664       }
1665 #endif
1666       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1667 
1668       // "Normal" instruction case
1669       DEBUG_ONLY(uint instr_offset = masm->offset());
1670       n->emit(masm, C->regalloc());
1671       current_offset = masm->offset();
1672 
1673       // Above we only verified that there is enough space in the instruction section.
1674       // However, the instruction may emit stubs that cause code buffer expansion.

1793     // Emit the deopt handler code.
1794     _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(masm));
1795   }
1796 
1797   // One last check for failed CodeBuffer::expand:
1798   if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1799     C->record_failure("CodeCache is full");
1800     return;
1801   }
1802 
1803 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) || defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_OPTO_ASSEMBLY)
1804   if (C->print_assembly()) {
1805     tty->cr();
1806     tty->print_cr("============================= C2-compiled nmethod ==============================");
1807   }
1808 #endif
1809 
1810 #if defined(SUPPORT_OPTO_ASSEMBLY)
1811   // Dump the assembly code, including basic-block numbers
1812   if (C->print_assembly()) {
1813     ttyLocker ttyl;  // keep the following output all in one block
1814     if (!VMThread::should_terminate()) {  // test this under the tty lock
1815       // print_metadata and dump_asm may safepoint which makes us loose the ttylock.
1816       // We call them first and write to a stringStream, then we retake the lock to
1817       // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1818       ResourceMark rm;
1819       stringStream method_metadata_str;
1820       if (C->method() != nullptr) {
1821         C->method()->print_metadata(&method_metadata_str);
1822       }
1823       stringStream dump_asm_str;
1824       dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1825 

1826       NoSafepointVerifier nsv;
1827       ttyLocker ttyl2;
1828       // This output goes directly to the tty, not the compiler log.
1829       // To enable tools to match it up with the compilation activity,
1830       // be sure to tag this tty output with the compile ID.
1831       if (xtty != nullptr) {
1832         xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1833                    C->is_osr_compilation() ? " compile_kind='osr'" : "");

1834       }

1835       if (C->method() != nullptr) {
1836         tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id());
1837         tty->print_raw(method_metadata_str.freeze());
1838       } else if (C->stub_name() != nullptr) {
1839         tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1840       }
1841       tty->cr();
1842       tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id());
1843       tty->print_raw(dump_asm_str.freeze());
1844       tty->print_cr("--------------------------------------------------------------------------------");
1845       if (xtty != nullptr) {
1846         xtty->tail("opto_assembly");
1847       }
1848     }
1849   }
1850 #endif
1851 }
1852 
1853 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1854   _inc_table.set_size(cnt);
1855 
1856   uint inct_cnt = 0;
1857   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
1858     Block* block = C->cfg()->get_block(i);
1859     Node *n = nullptr;
1860     int j;
1861 
1862     // Find the branch; ignore trailing NOPs.

3100 
3101 
3102 //-----------------------scratch_emit_size-------------------------------------
3103 // Helper function that computes size by emitting code
3104 uint PhaseOutput::scratch_emit_size(const Node* n) {
3105   // Start scratch_emit_size section.
3106   set_in_scratch_emit_size(true);
3107 
3108   // Emit into a trash buffer and count bytes emitted.
3109   // This is a pretty expensive way to compute a size,
3110   // but it works well enough if seldom used.
3111   // All common fixed-size instructions are given a size
3112   // method by the AD file.
3113   // Note that the scratch buffer blob and locs memory are
3114   // allocated at the beginning of the compile task, and
3115   // may be shared by several calls to scratch_emit_size.
3116   // The allocation of the scratch buffer blob is particularly
3117   // expensive, since it has to grab the code cache lock.
3118   BufferBlob* blob = this->scratch_buffer_blob();
3119   assert(blob != nullptr, "Initialize BufferBlob at start");
3120   assert(blob->size() > MAX_inst_size, "sanity");
3121   relocInfo* locs_buf = scratch_locs_memory();
3122   address blob_begin = blob->content_begin();
3123   address blob_end   = (address)locs_buf;
3124   assert(blob->contains(blob_end), "sanity");
3125   CodeBuffer buf(blob_begin, blob_end - blob_begin);
3126   buf.initialize_consts_size(_scratch_const_size);
3127   buf.initialize_stubs_size(MAX_stubs_size);
3128   assert(locs_buf != nullptr, "sanity");
3129   int lsize = MAX_locs_size / 3;
3130   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3131   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3132   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3133   // Mark as scratch buffer.
3134   buf.consts()->set_scratch_emit();
3135   buf.insts()->set_scratch_emit();
3136   buf.stubs()->set_scratch_emit();
3137 
3138   // Do the emission.
3139 
3140   Label fakeL; // Fake label for branch instructions.

3145   masm.bind(fakeL);
3146   if (is_branch) {
3147     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3148     n->as_MachBranch()->label_set(&fakeL, 0);
3149   }
3150   n->emit(&masm, C->regalloc());
3151 
3152   // Emitting into the scratch buffer should not fail
3153   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3154 
3155   if (is_branch) // Restore label.
3156     n->as_MachBranch()->label_set(saveL, save_bnum);
3157 
3158   // End scratch_emit_size section.
3159   set_in_scratch_emit_size(false);
3160 
3161   return buf.insts_size();
3162 }
3163 
3164 void PhaseOutput::install() {
3165   if (!C->should_install_code()) {
3166     return;
3167   } else if (C->stub_function() != nullptr) {
3168     install_stub(C->stub_name());
3169   } else {
3170     install_code(C->method(),
3171                  C->entry_bci(),
3172                  CompileBroker::compiler2(),
3173                  C->has_unsafe_access(),
3174                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3175   }
3176 }
3177 
3178 void PhaseOutput::install_code(ciMethod*         target,
3179                                int               entry_bci,
3180                                AbstractCompiler* compiler,
3181                                bool              has_unsafe_access,
3182                                bool              has_wide_vectors) {
3183   // Check if we want to skip execution of all compiled code.
3184   {
3185 #ifndef PRODUCT
3186     if (OptoNoExecute) {
3187       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3188       return;
3189     }
3190 #endif
3191     Compile::TracePhase tp(_t_registerMethod);
3192 

3197       if (!target->is_static()) {
3198         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3199         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3200         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3201         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3202       }
3203       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3204       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3205     }
3206 
3207     C->env()->register_method(target,
3208                                      entry_bci,
3209                                      &_code_offsets,
3210                                      _orig_pc_slot_offset_in_bytes,
3211                                      code_buffer(),
3212                                      frame_size_in_words(),
3213                                      oop_map_set(),
3214                                      &_handler_table,
3215                                      inc_table(),
3216                                      compiler,


3217                                      has_unsafe_access,
3218                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3219                                      C->has_monitors(),
3220                                      C->has_scoped_access(),
3221                                      0);

3222 
3223     if (C->log() != nullptr) { // Print code cache state into compiler log
3224       C->log()->code_cache_state();
3225     }

3226   }
3227 }
3228 void PhaseOutput::install_stub(const char* stub_name) {
3229   // Entry point will be accessed using stub_entry_point();
3230   if (code_buffer() == nullptr) {
3231     Matcher::soft_match_failure();
3232   } else {
3233     if (PrintAssembly && (WizardMode || Verbose))
3234       tty->print_cr("### Stub::%s", stub_name);
3235 
3236     if (!C->failing()) {
3237       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3238 
3239       // Make the NMethod
3240       // For now we mark the frame as never safe for profile stackwalking
3241       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3242                                                       code_buffer(),
3243                                                       CodeOffsets::frame_never_safe,
3244                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
3245                                                       frame_size_in_words(),

3382 
3383       // Dump the exception table as well
3384       if( n->is_Catch() && (Verbose || WizardMode) ) {
3385         // Print the exception table for this offset
3386         _handler_table.print_subtable_for(pc);
3387       }
3388       st->bol(); // Make sure we start on a new line
3389     }
3390     st->cr(); // one empty line between blocks
3391   } // End of per-block dump
3392 
3393   if (cut_short)  st->print_cr("*** disassembly is cut short ***");
3394 }
3395 #endif
3396 
3397 #ifndef PRODUCT
3398 void PhaseOutput::print_statistics() {
3399   Scheduling::print_statistics();
3400 }
3401 #endif















1332 
1333   // Initialize the space for the BufferBlob used to find and verify
1334   // instruction size in MachNode::emit_size()
1335   init_scratch_buffer_blob(const_req);
1336 }
1337 
1338 CodeBuffer* PhaseOutput::init_buffer() {
1339   int stub_req  = _buf_sizes._stub;
1340   int code_req  = _buf_sizes._code;
1341   int const_req = _buf_sizes._const;
1342 
1343   int pad_req   = NativeCall::byte_size();
1344 
1345   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1346   stub_req += bs->estimate_stub_size();
1347 
1348   // nmethod and CodeBuffer count stubs & constants as part of method's code.
1349   // class HandlerImpl is platform-specific and defined in the *.ad files.
1350   int deopt_handler_req     = HandlerImpl::size_deopt_handler()     + MAX_stubs_size; // add marginal slop for handler
1351   stub_req += MAX_stubs_size;   // ensure per-stub margin
1352   code_req += max_inst_size();  // ensure per-instruction margin
1353 
1354   if (StressCodeBuffers)
1355     code_req = const_req = stub_req = deopt_handler_req = 0x10;  // force expansion
1356 
1357   int total_req =
1358           const_req +
1359           code_req +
1360           pad_req +
1361           stub_req +
1362           deopt_handler_req;               // deopt handler
1363 
1364   CodeBuffer* cb = code_buffer();
1365   cb->set_const_section_alignment(constant_table().alignment());
1366   cb->initialize(total_req, _buf_sizes._reloc);
1367 
1368   // Have we run out of code space?
1369   if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1370     C->record_failure("CodeCache is full");
1371     return nullptr;
1372   }

1508         int padding = mach->compute_padding(current_offset);
1509         // Make sure safepoint node for polling is distinct from a call's
1510         // return by adding a nop if needed.
1511         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1512           padding = nop_size;
1513         }
1514         if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1515             current_offset == last_avoid_back_to_back_offset) {
1516           // Avoid back to back some instructions.
1517           padding = nop_size;
1518         }
1519 
1520         if (padding > 0) {
1521           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1522           int nops_cnt = padding / nop_size;
1523           MachNode *nop = new MachNopNode(nops_cnt);
1524           block->insert_node(nop, j++);
1525           last_inst++;
1526           C->cfg()->map_node_to_block(nop, block);
1527           // Ensure enough space.
1528           masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1529           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1530             C->record_failure("CodeCache is full");
1531             return;
1532           }
1533           nop->emit(masm, C->regalloc());
1534           masm->code()->flush_bundle(true);
1535           current_offset = masm->offset();
1536         }
1537 
1538         bool observe_safepoint = is_sfn;
1539         // Remember the start of the last call in a basic block
1540         if (is_mcall) {
1541           MachCallNode *mcall = mach->as_MachCall();
1542 
1543           // This destination address is NOT PC-relative
1544           mcall->method_set((intptr_t)mcall->entry_point());
1545 
1546           // Save the return address
1547           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1548 

1634                 Label *blkLabel = &blk_labels[block_num];
1635                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1636               }
1637             }
1638           }
1639         } else if (!n->is_Proj()) {
1640           // Remember the beginning of the previous instruction, in case
1641           // it's followed by a flag-kill and a null-check.  Happens on
1642           // Intel all the time, with add-to-memory kind of opcodes.
1643           previous_offset = current_offset;
1644         }
1645 
1646         // Not an else-if!
1647         // If this is a trap based cmp then add its offset to the list.
1648         if (mach->is_TrapBasedCheckNode()) {
1649           inct_starts[inct_cnt++] = current_offset;
1650         }
1651       }
1652 
1653       // Verify that there is sufficient space remaining
1654       masm->code()->insts()->maybe_expand_to_ensure_remaining(max_inst_size());
1655       if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1656         C->record_failure("CodeCache is full");
1657         return;
1658       }
1659 
1660       // Save the offset for the listing
1661 #if defined(SUPPORT_OPTO_ASSEMBLY)
1662       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1663         node_offsets[n->_idx] = masm->offset();
1664       }
1665 #endif
1666       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1667 
1668       // "Normal" instruction case
1669       DEBUG_ONLY(uint instr_offset = masm->offset());
1670       n->emit(masm, C->regalloc());
1671       current_offset = masm->offset();
1672 
1673       // Above we only verified that there is enough space in the instruction section.
1674       // However, the instruction may emit stubs that cause code buffer expansion.

1793     // Emit the deopt handler code.
1794     _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(masm));
1795   }
1796 
1797   // One last check for failed CodeBuffer::expand:
1798   if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1799     C->record_failure("CodeCache is full");
1800     return;
1801   }
1802 
1803 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) || defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_OPTO_ASSEMBLY)
1804   if (C->print_assembly()) {
1805     tty->cr();
1806     tty->print_cr("============================= C2-compiled nmethod ==============================");
1807   }
1808 #endif
1809 
1810 #if defined(SUPPORT_OPTO_ASSEMBLY)
1811   // Dump the assembly code, including basic-block numbers
1812   if (C->print_assembly()) {
1813     if (!VMThread::should_terminate()) {




1814       ResourceMark rm;
1815       stringStream method_metadata_str;
1816       if (C->method() != nullptr) {
1817         C->method()->print_metadata(&method_metadata_str);
1818       }
1819       stringStream dump_asm_str;
1820       dump_asm_on(&dump_asm_str, node_offsets, node_offset_limit);
1821 
1822       // Make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe.
1823       NoSafepointVerifier nsv;
1824       ttyLocker ttyl;
1825       // This output goes directly to the tty, not the compiler log.
1826       // To enable tools to match it up with the compilation activity,
1827       // be sure to tag this tty output with the compile ID.
1828       if (xtty != nullptr) {
1829         xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(),
1830                    C->is_osr_compilation() ? " compile_kind='osr'" :
1831                    (C->for_preload() ? " compile_kind='AP'" : ""));
1832       }
1833       const char* is_aot = C->env()->is_precompile() ? (C->for_preload() ? "(AP) " : "(A) -") : "-----";
1834       if (C->method() != nullptr) {
1835         tty->print_cr("----------------------- MetaData before Compile_id = %d %s-------------------", C->compile_id(), is_aot);
1836         tty->print_raw(method_metadata_str.freeze());
1837       } else if (C->stub_name() != nullptr) {
1838         tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name());
1839       }
1840       tty->cr();
1841       tty->print_cr("------------------------ OptoAssembly for Compile_id = %d %s------------------", C->compile_id(), is_aot);
1842       tty->print_raw(dump_asm_str.freeze());
1843       tty->print_cr("--------------------------------------------------------------------------------");
1844       if (xtty != nullptr) {
1845         xtty->tail("opto_assembly");
1846       }
1847     }
1848   }
1849 #endif
1850 }
1851 
1852 void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1853   _inc_table.set_size(cnt);
1854 
1855   uint inct_cnt = 0;
1856   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
1857     Block* block = C->cfg()->get_block(i);
1858     Node *n = nullptr;
1859     int j;
1860 
1861     // Find the branch; ignore trailing NOPs.

3099 
3100 
3101 //-----------------------scratch_emit_size-------------------------------------
3102 // Helper function that computes size by emitting code
3103 uint PhaseOutput::scratch_emit_size(const Node* n) {
3104   // Start scratch_emit_size section.
3105   set_in_scratch_emit_size(true);
3106 
3107   // Emit into a trash buffer and count bytes emitted.
3108   // This is a pretty expensive way to compute a size,
3109   // but it works well enough if seldom used.
3110   // All common fixed-size instructions are given a size
3111   // method by the AD file.
3112   // Note that the scratch buffer blob and locs memory are
3113   // allocated at the beginning of the compile task, and
3114   // may be shared by several calls to scratch_emit_size.
3115   // The allocation of the scratch buffer blob is particularly
3116   // expensive, since it has to grab the code cache lock.
3117   BufferBlob* blob = this->scratch_buffer_blob();
3118   assert(blob != nullptr, "Initialize BufferBlob at start");
3119   assert(blob->size() > max_inst_size(), "sanity");
3120   relocInfo* locs_buf = scratch_locs_memory();
3121   address blob_begin = blob->content_begin();
3122   address blob_end   = (address)locs_buf;
3123   assert(blob->contains(blob_end), "sanity");
3124   CodeBuffer buf(blob_begin, blob_end - blob_begin);
3125   buf.initialize_consts_size(_scratch_const_size);
3126   buf.initialize_stubs_size(MAX_stubs_size);
3127   assert(locs_buf != nullptr, "sanity");
3128   int lsize = MAX_locs_size / 3;
3129   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3130   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3131   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3132   // Mark as scratch buffer.
3133   buf.consts()->set_scratch_emit();
3134   buf.insts()->set_scratch_emit();
3135   buf.stubs()->set_scratch_emit();
3136 
3137   // Do the emission.
3138 
3139   Label fakeL; // Fake label for branch instructions.

3144   masm.bind(fakeL);
3145   if (is_branch) {
3146     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3147     n->as_MachBranch()->label_set(&fakeL, 0);
3148   }
3149   n->emit(&masm, C->regalloc());
3150 
3151   // Emitting into the scratch buffer should not fail
3152   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3153 
3154   if (is_branch) // Restore label.
3155     n->as_MachBranch()->label_set(saveL, save_bnum);
3156 
3157   // End scratch_emit_size section.
3158   set_in_scratch_emit_size(false);
3159 
3160   return buf.insts_size();
3161 }
3162 
3163 void PhaseOutput::install() {
3164   if (C->should_install_code() && C->stub_function() != nullptr) {


3165     install_stub(C->stub_name());
3166   } else {
3167     install_code(C->method(),
3168                  C->entry_bci(),
3169                  CompilerThread::current()->compiler(),
3170                  C->has_unsafe_access(),
3171                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3172   }
3173 }
3174 
3175 void PhaseOutput::install_code(ciMethod*         target,
3176                                int               entry_bci,
3177                                AbstractCompiler* compiler,
3178                                bool              has_unsafe_access,
3179                                bool              has_wide_vectors) {
3180   // Check if we want to skip execution of all compiled code.
3181   {
3182 #ifndef PRODUCT
3183     if (OptoNoExecute) {
3184       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3185       return;
3186     }
3187 #endif
3188     Compile::TracePhase tp(_t_registerMethod);
3189 

3194       if (!target->is_static()) {
3195         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3196         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3197         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3198         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3199       }
3200       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3201       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3202     }
3203 
3204     C->env()->register_method(target,
3205                                      entry_bci,
3206                                      &_code_offsets,
3207                                      _orig_pc_slot_offset_in_bytes,
3208                                      code_buffer(),
3209                                      frame_size_in_words(),
3210                                      oop_map_set(),
3211                                      &_handler_table,
3212                                      inc_table(),
3213                                      compiler,
3214                                      C->has_clinit_barriers(),
3215                                      C->for_preload(),
3216                                      has_unsafe_access,
3217                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3218                                      C->has_monitors(),
3219                                      C->has_scoped_access(),
3220                                      0,
3221                                      C->should_install_code());
3222 
3223     if (C->log() != nullptr) { // Print code cache state into compiler log
3224       C->log()->code_cache_state();
3225     }
3226     assert(!C->has_clinit_barriers() || C->for_preload(), "class init barriers should be only in preload code");
3227   }
3228 }
3229 void PhaseOutput::install_stub(const char* stub_name) {
3230   // Entry point will be accessed using stub_entry_point();
3231   if (code_buffer() == nullptr) {
3232     Matcher::soft_match_failure();
3233   } else {
3234     if (PrintAssembly && (WizardMode || Verbose))
3235       tty->print_cr("### Stub::%s", stub_name);
3236 
3237     if (!C->failing()) {
3238       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3239 
3240       // Make the NMethod
3241       // For now we mark the frame as never safe for profile stackwalking
3242       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
3243                                                       code_buffer(),
3244                                                       CodeOffsets::frame_never_safe,
3245                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
3246                                                       frame_size_in_words(),

3383 
3384       // Dump the exception table as well
3385       if( n->is_Catch() && (Verbose || WizardMode) ) {
3386         // Print the exception table for this offset
3387         _handler_table.print_subtable_for(pc);
3388       }
3389       st->bol(); // Make sure we start on a new line
3390     }
3391     st->cr(); // one empty line between blocks
3392   } // End of per-block dump
3393 
3394   if (cut_short)  st->print_cr("*** disassembly is cut short ***");
3395 }
3396 #endif
3397 
3398 #ifndef PRODUCT
3399 void PhaseOutput::print_statistics() {
3400   Scheduling::print_statistics();
3401 }
3402 #endif
3403 
3404 int PhaseOutput::max_inst_size() {
3405   if (AOTCodeCache::maybe_dumping_code()) {
3406     // See the comment in output.hpp.
3407     return 16384;
3408   } else {
3409     return mainline_MAX_inst_size;
3410   }
3411 }
3412 
3413 int PhaseOutput::max_inst_gcstub_size() {
3414   assert(mainline_MAX_inst_size <= max_inst_size(), "Sanity");
3415   return mainline_MAX_inst_size;
3416 }
< prev index next >