< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 224     _first_block_size(0),
 225     _handler_table(),
 226     _inc_table(),
 227     _stub_list(),
 228     _oop_map_set(nullptr),
 229     _scratch_buffer_blob(nullptr),
 230     _scratch_locs_memory(nullptr),
 231     _scratch_const_size(-1),
 232     _in_scratch_emit_size(false),
 233     _frame_slots(0),
 234     _code_offsets(),
 235     _node_bundling_limit(0),
 236     _node_bundling_base(nullptr),
 237     _orig_pc_slot(0),
 238     _orig_pc_slot_offset_in_bytes(0),
 239     _buf_sizes(),
 240     _block(nullptr),
 241     _index(0) {
 242   C->set_output(this);
 243   if (C->stub_name() == nullptr) {
 244     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 245   }
 246 }
 247 
 248 PhaseOutput::~PhaseOutput() {
 249   C->set_output(nullptr);
 250   if (_scratch_buffer_blob != nullptr) {
 251     BufferBlob::free(_scratch_buffer_blob);
 252   }
 253 }
 254 
 255 void PhaseOutput::perform_mach_node_analysis() {
 256   // Late barrier analysis must be done after schedule and bundle
 257   // Otherwise liveness based spilling will fail
 258   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 259   bs->late_barrier_analysis();
 260 
 261   pd_perform_mach_node_analysis();
 262 
 263   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 4);
 264 }
 265 
 266 // Convert Nodes to instruction bits and pass off to the VM
 267 void PhaseOutput::Output() {
 268   // RootNode goes
 269   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 270 
 271   // The number of new nodes (mostly MachNop) is proportional to
 272   // the number of java calls and inner loops which are aligned.
 273   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 274                             C->inner_loops()*(OptoLoopAlignment-1)),
 275                            "out of nodes before code generation" ) ) {
 276     return;
 277   }
 278   // Make sure I can find the Start Node
 279   Block *entry = C->cfg()->get_block(1);
 280   Block *broot = C->cfg()->get_root_block();
 281 
 282   const StartNode *start = entry->head()->as_Start();
 283 
 284   // Replace StartNode with prolog
 285   MachPrologNode *prolog = new MachPrologNode();

 286   entry->map_node(prolog, 0);
 287   C->cfg()->map_node_to_block(prolog, entry);
 288   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 289 
 290   // Virtual methods need an unverified entry point
 291 
 292   if( C->is_osr_compilation() ) {
 293     if( PoisonOSREntry ) {
 294       // TODO: Should use a ShouldNotReachHereNode...
 295       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 296     }
 297   } else {
 298     if( C->method() && !C->method()->flags().is_static() ) {
 299       // Insert unvalidated entry point
 300       C->cfg()->insert( broot, 0, new MachUEPNode() );











 301     }
 302 
 303   }
 304 
 305   // Break before main entry point
 306   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 307       (OptoBreakpoint && C->is_method_compilation())       ||
 308       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 309       (OptoBreakpointC2R && !C->method())                   ) {
 310     // checking for C->method() means that OptoBreakpoint does not apply to
 311     // runtime stubs or frame converters
 312     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 313   }
 314 
 315   // Insert epilogs before every return
 316   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 317     Block* block = C->cfg()->get_block(i);
 318     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 319       Node* m = block->end();
 320       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 321         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 322         block->add_inst(epilog);
 323         C->cfg()->map_node_to_block(epilog, block);
 324       }
 325     }
 326   }
 327 
 328   // Keeper of sizing aspects
 329   _buf_sizes = BufferSizingData();
 330 
 331   // Initialize code buffer
 332   estimate_buffer_size(_buf_sizes._const);
 333   if (C->failing()) return;
 334 
 335   // Pre-compute the length of blocks and replace
 336   // long branches with short if machine supports it.
 337   // Must be done before ScheduleAndBundle due to SPARC delay slots
 338   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 339   blk_starts[0] = 0;
 340   shorten_branches(blk_starts);
 341 

























 342   ScheduleAndBundle();
 343   if (C->failing()) {
 344     return;
 345   }
 346 
 347   perform_mach_node_analysis();
 348 
 349   // Complete sizing of codebuffer
 350   CodeBuffer* cb = init_buffer();
 351   if (cb == nullptr || C->failing()) {
 352     return;
 353   }
 354 
 355   BuildOopMaps();
 356 
 357   if (C->failing())  {
 358     return;
 359   }
 360 
 361   fill_buffer(cb, blk_starts);

 482     // Sum all instruction sizes to compute block size
 483     uint last_inst = block->number_of_nodes();
 484     uint blk_size = 0;
 485     for (uint j = 0; j < last_inst; j++) {
 486       _index = j;
 487       Node* nj = block->get_node(_index);
 488       // Handle machine instruction nodes
 489       if (nj->is_Mach()) {
 490         MachNode* mach = nj->as_Mach();
 491         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 492         reloc_size += mach->reloc();
 493         if (mach->is_MachCall()) {
 494           // add size information for trampoline stub
 495           // class CallStubImpl is platform-specific and defined in the *.ad files.
 496           stub_size  += CallStubImpl::size_call_trampoline();
 497           reloc_size += CallStubImpl::reloc_call_trampoline();
 498 
 499           MachCallNode *mcall = mach->as_MachCall();
 500           // This destination address is NOT PC-relative
 501 
 502           mcall->method_set((intptr_t)mcall->entry_point());


 503 
 504           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 505             stub_size  += CompiledStaticCall::to_interp_stub_size();
 506             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 507           }
 508         } else if (mach->is_MachSafePoint()) {
 509           // If call/safepoint are adjacent, account for possible
 510           // nop to disambiguate the two safepoints.
 511           // ScheduleAndBundle() can rearrange nodes in a block,
 512           // check for all offsets inside this block.
 513           if (last_call_adr >= blk_starts[i]) {
 514             blk_size += nop_size;
 515           }
 516         }
 517         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 518           // Nop is inserted between "avoid back to back" instructions.
 519           // ScheduleAndBundle() can rearrange nodes in a block,
 520           // check for all offsets inside this block.
 521           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 522             blk_size += nop_size;

 737     // New functionality:
 738     //   Assert if the local is not top. In product mode let the new node
 739     //   override the old entry.
 740     assert(local == C->top(), "LocArray collision");
 741     if (local == C->top()) {
 742       return;
 743     }
 744     array->pop();
 745   }
 746   const Type *t = local->bottom_type();
 747 
 748   // Is it a safepoint scalar object node?
 749   if (local->is_SafePointScalarObject()) {
 750     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 751 
 752     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 753     if (sv == nullptr) {
 754       ciKlass* cik = t->is_oopptr()->exact_klass();
 755       assert(cik->is_instance_klass() ||
 756              cik->is_array_klass(), "Not supported allocation.");

















 757       sv = new ObjectValue(spobj->_idx,
 758                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 759       set_sv_for_object_node(objs, sv);
 760 
 761       uint first_ind = spobj->first_index(sfpt->jvms());
 762       for (uint i = 0; i < spobj->n_fields(); i++) {
 763         Node* fld_node = sfpt->in(first_ind+i);
 764         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 765       }
 766     }
 767     array->append(sv);
 768     return;
 769   }
 770 
 771   // Grab the register number for the local
 772   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 773   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 774     // Record the double as two float registers.
 775     // The register mask for such a value always specifies two adjacent
 776     // float registers, with the lower register number even.
 777     // Normally, the allocation of high and low words to these registers
 778     // is irrelevant, because nearly all operations on register pairs
 779     // (e.g., StoreD) treat them as a single unit.
 780     // Here, we assume in addition that the words in these two registers
 781     // stored "naturally" (by operations like StoreD and double stores

 923       ShouldNotReachHere();
 924       break;
 925   }
 926 }
 927 
 928 // Determine if this node starts a bundle
 929 bool PhaseOutput::starts_bundle(const Node *n) const {
 930   return (_node_bundling_limit > n->_idx &&
 931           _node_bundling_base[n->_idx].starts_bundle());
 932 }
 933 
 934 //--------------------------Process_OopMap_Node--------------------------------
 935 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 936   // Handle special safepoint nodes for synchronization
 937   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 938   MachCallNode      *mcall;
 939 
 940   int safepoint_pc_offset = current_offset;
 941   bool is_method_handle_invoke = false;
 942   bool return_oop = false;

 943   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 944   bool arg_escape = false;
 945 
 946   // Add the safepoint in the DebugInfoRecorder
 947   if( !mach->is_MachCall() ) {
 948     mcall = nullptr;
 949     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 950   } else {
 951     mcall = mach->as_MachCall();
 952 
 953     // Is the call a MethodHandle call?
 954     if (mcall->is_MachCallJava()) {
 955       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 956         assert(C->has_method_handle_invokes(), "must have been set during call generation");
 957         is_method_handle_invoke = true;
 958       }
 959       arg_escape = mcall->as_MachCallJava()->_arg_escape;
 960     }
 961 
 962     // Check if a call returns an object.
 963     if (mcall->returns_pointer()) {
 964       return_oop = true;
 965     }



 966     safepoint_pc_offset += mcall->ret_addr_offset();
 967     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 968   }
 969 
 970   // Loop over the JVMState list to add scope information
 971   // Do not skip safepoints with a null method, they need monitor info
 972   JVMState* youngest_jvms = sfn->jvms();
 973   int max_depth = youngest_jvms->depth();
 974 
 975   // Allocate the object pool for scalar-replaced objects -- the map from
 976   // small-integer keys (which can be recorded in the local and ostack
 977   // arrays) to descriptions of the object state.
 978   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 979 
 980   // Visit scopes from oldest to youngest.
 981   for (int depth = 1; depth <= max_depth; depth++) {
 982     JVMState* jvms = youngest_jvms->of_depth(depth);
 983     int idx;
 984     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
 985     // Safepoints that do not have method() set only provide oop-map and monitor info

1069     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1070     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1071 
1072     // Make method available for all Safepoints
1073     ciMethod* scope_method = method ? method : C->method();
1074     // Describe the scope here
1075     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1076     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1077     // Now we can describe the scope.
1078     methodHandle null_mh;
1079     bool rethrow_exception = false;
1080     C->debug_info()->describe_scope(
1081       safepoint_pc_offset,
1082       null_mh,
1083       scope_method,
1084       jvms->bci(),
1085       jvms->should_reexecute(),
1086       rethrow_exception,
1087       is_method_handle_invoke,
1088       return_oop,

1089       has_ea_local_in_scope,
1090       arg_escape,
1091       locvals,
1092       expvals,
1093       monvals
1094     );
1095   } // End jvms loop
1096 
1097   // Mark the end of the scope set.
1098   C->debug_info()->end_safepoint(safepoint_pc_offset);
1099 }
1100 
1101 
1102 
1103 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1104 class NonSafepointEmitter {
1105     Compile*  C;
1106     JVMState* _pending_jvms;
1107     int       _pending_offset;
1108 

1444           MachNode *nop = new MachNopNode(nops_cnt);
1445           block->insert_node(nop, j++);
1446           last_inst++;
1447           C->cfg()->map_node_to_block(nop, block);
1448           // Ensure enough space.
1449           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1450           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1451             C->record_failure("CodeCache is full");
1452             return;
1453           }
1454           nop->emit(*cb, C->regalloc());
1455           cb->flush_bundle(true);
1456           current_offset = cb->insts_size();
1457         }
1458 
1459         bool observe_safepoint = is_sfn;
1460         // Remember the start of the last call in a basic block
1461         if (is_mcall) {
1462           MachCallNode *mcall = mach->as_MachCall();
1463 
1464           // This destination address is NOT PC-relative
1465           mcall->method_set((intptr_t)mcall->entry_point());


1466 
1467           // Save the return address
1468           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1469 
1470           observe_safepoint = mcall->guaranteed_safepoint();
1471         }
1472 
1473         // sfn will be valid whenever mcall is valid now because of inheritance
1474         if (observe_safepoint) {
1475           // Handle special safepoint nodes for synchronization
1476           if (!is_mcall) {
1477             MachSafePointNode *sfn = mach->as_MachSafePoint();
1478             // !!!!! Stubs only need an oopmap right now, so bail out
1479             if (sfn->jvms()->method() == nullptr) {
1480               // Write the oopmap directly to the code blob??!!
1481               continue;
1482             }
1483           } // End synchronization
1484 
1485           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1609       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1610         node_offsets[n->_idx] = cb->insts_size();
1611       }
1612 #endif
1613       assert(!C->failing(), "Should not reach here if failing.");
1614 
1615       // "Normal" instruction case
1616       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1617       n->emit(*cb, C->regalloc());
1618       current_offset = cb->insts_size();
1619 
1620       // Above we only verified that there is enough space in the instruction section.
1621       // However, the instruction may emit stubs that cause code buffer expansion.
1622       // Bail out here if expansion failed due to a lack of code cache space.
1623       if (C->failing()) {
1624         return;
1625       }
1626 
1627       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1628              "ret_addr_offset() not within emitted code");
1629 
1630 #ifdef ASSERT
1631       uint n_size = n->size(C->regalloc());
1632       if (n_size < (current_offset-instr_offset)) {
1633         MachNode* mach = n->as_Mach();
1634         n->dump();
1635         mach->dump_format(C->regalloc(), tty);
1636         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1637         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1638         tty->print_cr(" ------------------- ");
1639         BufferBlob* blob = this->scratch_buffer_blob();
1640         address blob_begin = blob->content_begin();
1641         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1642         assert(false, "wrong size of mach node");
1643       }
1644 #endif
1645       non_safepoints.observe_instruction(n, current_offset);
1646 
1647       // mcall is last "call" that can be a safepoint
1648       // record it so we can see if a poll will directly follow it
1649       // in which case we'll need a pad to make the PcDesc sites unique

3002         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3003         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3004       }
3005     }
3006     // Do not allow defs of new derived values to float above GC
3007     // points unless the base is definitely available at the GC point.
3008 
3009     Node *m = b->get_node(i);
3010 
3011     // Add precedence edge from following safepoint to use of derived pointer
3012     if( last_safept_node != end_node &&
3013         m != last_safept_node) {
3014       for (uint k = 1; k < m->req(); k++) {
3015         const Type *t = m->in(k)->bottom_type();
3016         if( t->isa_oop_ptr() &&
3017             t->is_ptr()->offset() != 0 ) {
3018           last_safept_node->add_prec( m );
3019           break;
3020         }
3021       }













3022     }
3023 
3024     if( n->jvms() ) {           // Precedence edge from derived to safept
3025       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3026       if( b->get_node(last_safept) != last_safept_node ) {
3027         last_safept = b->find_node(last_safept_node);
3028       }
3029       for( uint j=last_safept; j > i; j-- ) {
3030         Node *mach = b->get_node(j);
3031         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3032           mach->add_prec( n );
3033       }
3034       last_safept = i;
3035       last_safept_node = m;
3036     }
3037   }
3038 
3039   if (fat_proj_seen) {
3040     // Garbage collect pinch nodes that were not consumed.
3041     // They are usually created by a fat kill MachProj for a call.

3160 }
3161 #endif
3162 
3163 //-----------------------init_scratch_buffer_blob------------------------------
3164 // Construct a temporary BufferBlob and cache it for this compile.
3165 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3166   // If there is already a scratch buffer blob allocated and the
3167   // constant section is big enough, use it.  Otherwise free the
3168   // current and allocate a new one.
3169   BufferBlob* blob = scratch_buffer_blob();
3170   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3171     // Use the current blob.
3172   } else {
3173     if (blob != nullptr) {
3174       BufferBlob::free(blob);
3175     }
3176 
3177     ResourceMark rm;
3178     _scratch_const_size = const_size;
3179     int size = C2Compiler::initial_code_buffer_size(const_size);



















3180     blob = BufferBlob::create("Compile::scratch_buffer", size);
3181     // Record the buffer blob for next time.
3182     set_scratch_buffer_blob(blob);
3183     // Have we run out of code space?
3184     if (scratch_buffer_blob() == nullptr) {
3185       // Let CompilerBroker disable further compilations.
3186       C->record_failure("Not enough space for scratch buffer in CodeCache");
3187       return;
3188     }
3189   }
3190 
3191   // Initialize the relocation buffers
3192   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3193   set_scratch_locs_memory(locs_buf);
3194 }
3195 
3196 
3197 //-----------------------scratch_emit_size-------------------------------------
3198 // Helper function that computes size by emitting code
3199 uint PhaseOutput::scratch_emit_size(const Node* n) {

3230   buf.insts()->set_scratch_emit();
3231   buf.stubs()->set_scratch_emit();
3232 
3233   // Do the emission.
3234 
3235   Label fakeL; // Fake label for branch instructions.
3236   Label*   saveL = nullptr;
3237   uint save_bnum = 0;
3238   bool is_branch = n->is_MachBranch();
3239   if (is_branch) {
3240     MacroAssembler masm(&buf);
3241     masm.bind(fakeL);
3242     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3243     n->as_MachBranch()->label_set(&fakeL, 0);
3244   }
3245   n->emit(buf, C->regalloc());
3246 
3247   // Emitting into the scratch buffer should not fail
3248   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3249 
3250   if (is_branch) // Restore label.

3251     n->as_MachBranch()->label_set(saveL, save_bnum);

3252 
3253   // End scratch_emit_size section.
3254   set_in_scratch_emit_size(false);
3255 
3256   return buf.insts_size();
3257 }
3258 
3259 void PhaseOutput::install() {
3260   if (!C->should_install_code()) {
3261     return;
3262   } else if (C->stub_function() != nullptr) {
3263     install_stub(C->stub_name());
3264   } else {
3265     install_code(C->method(),
3266                  C->entry_bci(),
3267                  CompileBroker::compiler2(),
3268                  C->has_unsafe_access(),
3269                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3270                  C->rtm_state());
3271   }

3275                                int               entry_bci,
3276                                AbstractCompiler* compiler,
3277                                bool              has_unsafe_access,
3278                                bool              has_wide_vectors,
3279                                RTMState          rtm_state) {
3280   // Check if we want to skip execution of all compiled code.
3281   {
3282 #ifndef PRODUCT
3283     if (OptoNoExecute) {
3284       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3285       return;
3286     }
3287 #endif
3288     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3289 
3290     if (C->is_osr_compilation()) {
3291       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3292       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3293     } else {
3294       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3295       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3296     }
3297 
3298     C->env()->register_method(target,
3299                                      entry_bci,
3300                                      &_code_offsets,
3301                                      _orig_pc_slot_offset_in_bytes,
3302                                      code_buffer(),
3303                                      frame_size_in_words(),
3304                                      oop_map_set(),
3305                                      &_handler_table,
3306                                      inc_table(),
3307                                      compiler,
3308                                      has_unsafe_access,
3309                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3310                                      C->has_monitors(),
3311                                      0,
3312                                      C->rtm_state());
3313 
3314     if (C->log() != nullptr) { // Print code cache state into compiler log
3315       C->log()->code_cache_state();
3316     }
3317   }
3318 }
3319 void PhaseOutput::install_stub(const char* stub_name) {
3320   // Entry point will be accessed using stub_entry_point();
3321   if (code_buffer() == nullptr) {
3322     Matcher::soft_match_failure();
3323   } else {
3324     if (PrintAssembly && (WizardMode || Verbose))
3325       tty->print_cr("### Stub::%s", stub_name);
3326 
3327     if (!C->failing()) {
3328       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3329 
3330       // Make the NMethod
3331       // For now we mark the frame as never safe for profile stackwalking
3332       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 225     _first_block_size(0),
 226     _handler_table(),
 227     _inc_table(),
 228     _stub_list(),
 229     _oop_map_set(nullptr),
 230     _scratch_buffer_blob(nullptr),
 231     _scratch_locs_memory(nullptr),
 232     _scratch_const_size(-1),
 233     _in_scratch_emit_size(false),
 234     _frame_slots(0),
 235     _code_offsets(),
 236     _node_bundling_limit(0),
 237     _node_bundling_base(nullptr),
 238     _orig_pc_slot(0),
 239     _orig_pc_slot_offset_in_bytes(0),
 240     _buf_sizes(),
 241     _block(nullptr),
 242     _index(0) {
 243   C->set_output(this);
 244   if (C->stub_name() == nullptr) {
 245     int fixed_slots = C->fixed_slots();
 246     if (C->needs_stack_repair()) {
 247       fixed_slots -= 2;
 248     }
 249     // TODO 8284443 Only reserve extra slot if needed
 250     if (InlineTypeReturnedAsFields) {
 251       fixed_slots -= 2;
 252     }
 253     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 254   }
 255 }
 256 
 257 PhaseOutput::~PhaseOutput() {
 258   C->set_output(nullptr);
 259   if (_scratch_buffer_blob != nullptr) {
 260     BufferBlob::free(_scratch_buffer_blob);
 261   }
 262 }
 263 
 264 void PhaseOutput::perform_mach_node_analysis() {
 265   // Late barrier analysis must be done after schedule and bundle
 266   // Otherwise liveness based spilling will fail
 267   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 268   bs->late_barrier_analysis();
 269 
 270   pd_perform_mach_node_analysis();
 271 
 272   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 4);
 273 }
 274 
 275 // Convert Nodes to instruction bits and pass off to the VM
 276 void PhaseOutput::Output() {
 277   // RootNode goes
 278   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 279 
 280   // The number of new nodes (mostly MachNop) is proportional to
 281   // the number of java calls and inner loops which are aligned.
 282   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 283                             C->inner_loops()*(OptoLoopAlignment-1)),
 284                            "out of nodes before code generation" ) ) {
 285     return;
 286   }
 287   // Make sure I can find the Start Node
 288   Block *entry = C->cfg()->get_block(1);
 289   Block *broot = C->cfg()->get_root_block();
 290 
 291   const StartNode *start = entry->head()->as_Start();
 292 
 293   // Replace StartNode with prolog
 294   Label verified_entry;
 295   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 296   entry->map_node(prolog, 0);
 297   C->cfg()->map_node_to_block(prolog, entry);
 298   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 299 
 300   // Virtual methods need an unverified entry point
 301   if (C->is_osr_compilation()) {
 302     if (PoisonOSREntry) {

 303       // TODO: Should use a ShouldNotReachHereNode...
 304       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 305     }
 306   } else {
 307     if (C->method()) {
 308       if (C->method()->has_scalarized_args()) {
 309         // Add entry point to unpack all inline type arguments
 310         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 311         if (!C->method()->is_static()) {
 312           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 313           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 314           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 315           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 316         }
 317       } else if (!C->method()->is_static()) {
 318         // Insert unvalidated entry point
 319         C->cfg()->insert(broot, 0, new MachUEPNode());
 320       }
 321     }

 322   }
 323 
 324   // Break before main entry point
 325   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 326       (OptoBreakpoint && C->is_method_compilation())       ||
 327       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 328       (OptoBreakpointC2R && !C->method())                   ) {
 329     // checking for C->method() means that OptoBreakpoint does not apply to
 330     // runtime stubs or frame converters
 331     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 332   }
 333 
 334   // Insert epilogs before every return
 335   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 336     Block* block = C->cfg()->get_block(i);
 337     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 338       Node* m = block->end();
 339       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 340         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 341         block->add_inst(epilog);
 342         C->cfg()->map_node_to_block(epilog, block);
 343       }
 344     }
 345   }
 346 
 347   // Keeper of sizing aspects
 348   _buf_sizes = BufferSizingData();
 349 
 350   // Initialize code buffer
 351   estimate_buffer_size(_buf_sizes._const);
 352   if (C->failing()) return;
 353 
 354   // Pre-compute the length of blocks and replace
 355   // long branches with short if machine supports it.
 356   // Must be done before ScheduleAndBundle due to SPARC delay slots
 357   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 358   blk_starts[0] = 0;
 359   shorten_branches(blk_starts);
 360 
 361   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 362     // Compute the offsets of the entry points required by the inline type calling convention
 363     if (!C->method()->is_static()) {
 364       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 365       // Entry                     (unverified) @ offset 0
 366       // Verified_Inline_Entry_RO
 367       // Inline_Entry              (unverified)
 368       // Verified_Inline_Entry
 369       uint offset = 0;
 370       _code_offsets.set_value(CodeOffsets::Entry, offset);
 371 
 372       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 373       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 374 
 375       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 376       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 377 
 378       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 379       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 380     } else {
 381       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 382       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 383     }
 384   }
 385 
 386   ScheduleAndBundle();
 387   if (C->failing()) {
 388     return;
 389   }
 390 
 391   perform_mach_node_analysis();
 392 
 393   // Complete sizing of codebuffer
 394   CodeBuffer* cb = init_buffer();
 395   if (cb == nullptr || C->failing()) {
 396     return;
 397   }
 398 
 399   BuildOopMaps();
 400 
 401   if (C->failing())  {
 402     return;
 403   }
 404 
 405   fill_buffer(cb, blk_starts);

 526     // Sum all instruction sizes to compute block size
 527     uint last_inst = block->number_of_nodes();
 528     uint blk_size = 0;
 529     for (uint j = 0; j < last_inst; j++) {
 530       _index = j;
 531       Node* nj = block->get_node(_index);
 532       // Handle machine instruction nodes
 533       if (nj->is_Mach()) {
 534         MachNode* mach = nj->as_Mach();
 535         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 536         reloc_size += mach->reloc();
 537         if (mach->is_MachCall()) {
 538           // add size information for trampoline stub
 539           // class CallStubImpl is platform-specific and defined in the *.ad files.
 540           stub_size  += CallStubImpl::size_call_trampoline();
 541           reloc_size += CallStubImpl::reloc_call_trampoline();
 542 
 543           MachCallNode *mcall = mach->as_MachCall();
 544           // This destination address is NOT PC-relative
 545 
 546           if (mcall->entry_point() != nullptr) {
 547             mcall->method_set((intptr_t)mcall->entry_point());
 548           }
 549 
 550           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 551             stub_size  += CompiledStaticCall::to_interp_stub_size();
 552             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 553           }
 554         } else if (mach->is_MachSafePoint()) {
 555           // If call/safepoint are adjacent, account for possible
 556           // nop to disambiguate the two safepoints.
 557           // ScheduleAndBundle() can rearrange nodes in a block,
 558           // check for all offsets inside this block.
 559           if (last_call_adr >= blk_starts[i]) {
 560             blk_size += nop_size;
 561           }
 562         }
 563         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 564           // Nop is inserted between "avoid back to back" instructions.
 565           // ScheduleAndBundle() can rearrange nodes in a block,
 566           // check for all offsets inside this block.
 567           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 568             blk_size += nop_size;

 783     // New functionality:
 784     //   Assert if the local is not top. In product mode let the new node
 785     //   override the old entry.
 786     assert(local == C->top(), "LocArray collision");
 787     if (local == C->top()) {
 788       return;
 789     }
 790     array->pop();
 791   }
 792   const Type *t = local->bottom_type();
 793 
 794   // Is it a safepoint scalar object node?
 795   if (local->is_SafePointScalarObject()) {
 796     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 797 
 798     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 799     if (sv == nullptr) {
 800       ciKlass* cik = t->is_oopptr()->exact_klass();
 801       assert(cik->is_instance_klass() ||
 802              cik->is_array_klass(), "Not supported allocation.");
 803       uint first_ind = spobj->first_index(sfpt->jvms());
 804       // Nullable, scalarized inline types have an is_init input
 805       // that needs to be checked before using the field values.
 806       ScopeValue* is_init = nullptr;
 807       if (cik->is_inlinetype()) {
 808         Node* init_node = sfpt->in(first_ind++);
 809         assert(init_node != nullptr, "is_init node not found");
 810         if (!init_node->is_top()) {
 811           const TypeInt* init_type = init_node->bottom_type()->is_int();
 812           if (init_node->is_Con()) {
 813             is_init = new ConstantIntValue(init_type->get_con());
 814           } else {
 815             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 816             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 817           }
 818         }
 819       }
 820       sv = new ObjectValue(spobj->_idx,
 821                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 822       set_sv_for_object_node(objs, sv);
 823 

 824       for (uint i = 0; i < spobj->n_fields(); i++) {
 825         Node* fld_node = sfpt->in(first_ind+i);
 826         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 827       }
 828     }
 829     array->append(sv);
 830     return;
 831   }
 832 
 833   // Grab the register number for the local
 834   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 835   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 836     // Record the double as two float registers.
 837     // The register mask for such a value always specifies two adjacent
 838     // float registers, with the lower register number even.
 839     // Normally, the allocation of high and low words to these registers
 840     // is irrelevant, because nearly all operations on register pairs
 841     // (e.g., StoreD) treat them as a single unit.
 842     // Here, we assume in addition that the words in these two registers
 843     // stored "naturally" (by operations like StoreD and double stores

 985       ShouldNotReachHere();
 986       break;
 987   }
 988 }
 989 
 990 // Determine if this node starts a bundle
 991 bool PhaseOutput::starts_bundle(const Node *n) const {
 992   return (_node_bundling_limit > n->_idx &&
 993           _node_bundling_base[n->_idx].starts_bundle());
 994 }
 995 
 996 //--------------------------Process_OopMap_Node--------------------------------
 997 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 998   // Handle special safepoint nodes for synchronization
 999   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1000   MachCallNode      *mcall;
1001 
1002   int safepoint_pc_offset = current_offset;
1003   bool is_method_handle_invoke = false;
1004   bool return_oop = false;
1005   bool return_scalarized = false;
1006   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1007   bool arg_escape = false;
1008 
1009   // Add the safepoint in the DebugInfoRecorder
1010   if( !mach->is_MachCall() ) {
1011     mcall = nullptr;
1012     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1013   } else {
1014     mcall = mach->as_MachCall();
1015 
1016     // Is the call a MethodHandle call?
1017     if (mcall->is_MachCallJava()) {
1018       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1019         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1020         is_method_handle_invoke = true;
1021       }
1022       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1023     }
1024 
1025     // Check if a call returns an object.
1026     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1027       return_oop = true;
1028     }
1029     if (mcall->returns_scalarized()) {
1030       return_scalarized = true;
1031     }
1032     safepoint_pc_offset += mcall->ret_addr_offset();
1033     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1034   }
1035 
1036   // Loop over the JVMState list to add scope information
1037   // Do not skip safepoints with a null method, they need monitor info
1038   JVMState* youngest_jvms = sfn->jvms();
1039   int max_depth = youngest_jvms->depth();
1040 
1041   // Allocate the object pool for scalar-replaced objects -- the map from
1042   // small-integer keys (which can be recorded in the local and ostack
1043   // arrays) to descriptions of the object state.
1044   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1045 
1046   // Visit scopes from oldest to youngest.
1047   for (int depth = 1; depth <= max_depth; depth++) {
1048     JVMState* jvms = youngest_jvms->of_depth(depth);
1049     int idx;
1050     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1051     // Safepoints that do not have method() set only provide oop-map and monitor info

1135     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1136     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1137 
1138     // Make method available for all Safepoints
1139     ciMethod* scope_method = method ? method : C->method();
1140     // Describe the scope here
1141     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1142     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1143     // Now we can describe the scope.
1144     methodHandle null_mh;
1145     bool rethrow_exception = false;
1146     C->debug_info()->describe_scope(
1147       safepoint_pc_offset,
1148       null_mh,
1149       scope_method,
1150       jvms->bci(),
1151       jvms->should_reexecute(),
1152       rethrow_exception,
1153       is_method_handle_invoke,
1154       return_oop,
1155       return_scalarized,
1156       has_ea_local_in_scope,
1157       arg_escape,
1158       locvals,
1159       expvals,
1160       monvals
1161     );
1162   } // End jvms loop
1163 
1164   // Mark the end of the scope set.
1165   C->debug_info()->end_safepoint(safepoint_pc_offset);
1166 }
1167 
1168 
1169 
1170 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1171 class NonSafepointEmitter {
1172     Compile*  C;
1173     JVMState* _pending_jvms;
1174     int       _pending_offset;
1175 

1511           MachNode *nop = new MachNopNode(nops_cnt);
1512           block->insert_node(nop, j++);
1513           last_inst++;
1514           C->cfg()->map_node_to_block(nop, block);
1515           // Ensure enough space.
1516           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1517           if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1518             C->record_failure("CodeCache is full");
1519             return;
1520           }
1521           nop->emit(*cb, C->regalloc());
1522           cb->flush_bundle(true);
1523           current_offset = cb->insts_size();
1524         }
1525 
1526         bool observe_safepoint = is_sfn;
1527         // Remember the start of the last call in a basic block
1528         if (is_mcall) {
1529           MachCallNode *mcall = mach->as_MachCall();
1530 
1531           if (mcall->entry_point() != nullptr) {
1532             // This destination address is NOT PC-relative
1533             mcall->method_set((intptr_t)mcall->entry_point());
1534           }
1535 
1536           // Save the return address
1537           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1538 
1539           observe_safepoint = mcall->guaranteed_safepoint();
1540         }
1541 
1542         // sfn will be valid whenever mcall is valid now because of inheritance
1543         if (observe_safepoint) {
1544           // Handle special safepoint nodes for synchronization
1545           if (!is_mcall) {
1546             MachSafePointNode *sfn = mach->as_MachSafePoint();
1547             // !!!!! Stubs only need an oopmap right now, so bail out
1548             if (sfn->jvms()->method() == nullptr) {
1549               // Write the oopmap directly to the code blob??!!
1550               continue;
1551             }
1552           } // End synchronization
1553 
1554           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1678       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1679         node_offsets[n->_idx] = cb->insts_size();
1680       }
1681 #endif
1682       assert(!C->failing(), "Should not reach here if failing.");
1683 
1684       // "Normal" instruction case
1685       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1686       n->emit(*cb, C->regalloc());
1687       current_offset = cb->insts_size();
1688 
1689       // Above we only verified that there is enough space in the instruction section.
1690       // However, the instruction may emit stubs that cause code buffer expansion.
1691       // Bail out here if expansion failed due to a lack of code cache space.
1692       if (C->failing()) {
1693         return;
1694       }
1695 
1696       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1697              "ret_addr_offset() not within emitted code");

1698 #ifdef ASSERT
1699       uint n_size = n->size(C->regalloc());
1700       if (n_size < (current_offset-instr_offset)) {
1701         MachNode* mach = n->as_Mach();
1702         n->dump();
1703         mach->dump_format(C->regalloc(), tty);
1704         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1705         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1706         tty->print_cr(" ------------------- ");
1707         BufferBlob* blob = this->scratch_buffer_blob();
1708         address blob_begin = blob->content_begin();
1709         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1710         assert(false, "wrong size of mach node");
1711       }
1712 #endif
1713       non_safepoints.observe_instruction(n, current_offset);
1714 
1715       // mcall is last "call" that can be a safepoint
1716       // record it so we can see if a poll will directly follow it
1717       // in which case we'll need a pad to make the PcDesc sites unique

3070         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3071         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3072       }
3073     }
3074     // Do not allow defs of new derived values to float above GC
3075     // points unless the base is definitely available at the GC point.
3076 
3077     Node *m = b->get_node(i);
3078 
3079     // Add precedence edge from following safepoint to use of derived pointer
3080     if( last_safept_node != end_node &&
3081         m != last_safept_node) {
3082       for (uint k = 1; k < m->req(); k++) {
3083         const Type *t = m->in(k)->bottom_type();
3084         if( t->isa_oop_ptr() &&
3085             t->is_ptr()->offset() != 0 ) {
3086           last_safept_node->add_prec( m );
3087           break;
3088         }
3089       }
3090 
3091       // Do not allow a CheckCastPP node whose input is a raw pointer to
3092       // float past a safepoint.  This can occur when a buffered inline
3093       // type is allocated in a loop and the CheckCastPP from that
3094       // allocation is reused outside the loop.  If the use inside the
3095       // loop is scalarized the CheckCastPP will no longer be connected
3096       // to the loop safepoint.  See JDK-8264340.
3097       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3098         Node *def = m->in(1);
3099         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3100           last_safept_node->add_prec(m);
3101         }
3102       }
3103     }
3104 
3105     if( n->jvms() ) {           // Precedence edge from derived to safept
3106       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3107       if( b->get_node(last_safept) != last_safept_node ) {
3108         last_safept = b->find_node(last_safept_node);
3109       }
3110       for( uint j=last_safept; j > i; j-- ) {
3111         Node *mach = b->get_node(j);
3112         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3113           mach->add_prec( n );
3114       }
3115       last_safept = i;
3116       last_safept_node = m;
3117     }
3118   }
3119 
3120   if (fat_proj_seen) {
3121     // Garbage collect pinch nodes that were not consumed.
3122     // They are usually created by a fat kill MachProj for a call.

3241 }
3242 #endif
3243 
3244 //-----------------------init_scratch_buffer_blob------------------------------
3245 // Construct a temporary BufferBlob and cache it for this compile.
3246 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3247   // If there is already a scratch buffer blob allocated and the
3248   // constant section is big enough, use it.  Otherwise free the
3249   // current and allocate a new one.
3250   BufferBlob* blob = scratch_buffer_blob();
3251   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3252     // Use the current blob.
3253   } else {
3254     if (blob != nullptr) {
3255       BufferBlob::free(blob);
3256     }
3257 
3258     ResourceMark rm;
3259     _scratch_const_size = const_size;
3260     int size = C2Compiler::initial_code_buffer_size(const_size);
3261     if (C->has_scalarized_args()) {
3262       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3263       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3264       ciMethod* method = C->method();
3265       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3266       int arg_num = 0;
3267       if (!method->is_static()) {
3268         if (method->is_scalarized_arg(arg_num)) {
3269           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3270         }
3271         arg_num++;
3272       }
3273       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3274         if (method->is_scalarized_arg(arg_num)) {
3275           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3276         }
3277         arg_num++;
3278       }
3279     }
3280     blob = BufferBlob::create("Compile::scratch_buffer", size);
3281     // Record the buffer blob for next time.
3282     set_scratch_buffer_blob(blob);
3283     // Have we run out of code space?
3284     if (scratch_buffer_blob() == nullptr) {
3285       // Let CompilerBroker disable further compilations.
3286       C->record_failure("Not enough space for scratch buffer in CodeCache");
3287       return;
3288     }
3289   }
3290 
3291   // Initialize the relocation buffers
3292   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3293   set_scratch_locs_memory(locs_buf);
3294 }
3295 
3296 
3297 //-----------------------scratch_emit_size-------------------------------------
3298 // Helper function that computes size by emitting code
3299 uint PhaseOutput::scratch_emit_size(const Node* n) {

3330   buf.insts()->set_scratch_emit();
3331   buf.stubs()->set_scratch_emit();
3332 
3333   // Do the emission.
3334 
3335   Label fakeL; // Fake label for branch instructions.
3336   Label*   saveL = nullptr;
3337   uint save_bnum = 0;
3338   bool is_branch = n->is_MachBranch();
3339   if (is_branch) {
3340     MacroAssembler masm(&buf);
3341     masm.bind(fakeL);
3342     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3343     n->as_MachBranch()->label_set(&fakeL, 0);
3344   }
3345   n->emit(buf, C->regalloc());
3346 
3347   // Emitting into the scratch buffer should not fail
3348   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3349 
3350   // Restore label.
3351   if (is_branch) {
3352     n->as_MachBranch()->label_set(saveL, save_bnum);
3353   }
3354 
3355   // End scratch_emit_size section.
3356   set_in_scratch_emit_size(false);
3357 
3358   return buf.insts_size();
3359 }
3360 
3361 void PhaseOutput::install() {
3362   if (!C->should_install_code()) {
3363     return;
3364   } else if (C->stub_function() != nullptr) {
3365     install_stub(C->stub_name());
3366   } else {
3367     install_code(C->method(),
3368                  C->entry_bci(),
3369                  CompileBroker::compiler2(),
3370                  C->has_unsafe_access(),
3371                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3372                  C->rtm_state());
3373   }

3377                                int               entry_bci,
3378                                AbstractCompiler* compiler,
3379                                bool              has_unsafe_access,
3380                                bool              has_wide_vectors,
3381                                RTMState          rtm_state) {
3382   // Check if we want to skip execution of all compiled code.
3383   {
3384 #ifndef PRODUCT
3385     if (OptoNoExecute) {
3386       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3387       return;
3388     }
3389 #endif
3390     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3391 
3392     if (C->is_osr_compilation()) {
3393       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3394       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3395     } else {
3396       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3397       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3398         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3399       }
3400       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3401         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3402       }
3403       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3404         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3405       }
3406       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3407     }
3408 
3409     C->env()->register_method(target,
3410                               entry_bci,
3411                               &_code_offsets,
3412                               _orig_pc_slot_offset_in_bytes,
3413                               code_buffer(),
3414                               frame_size_in_words(),
3415                               _oop_map_set,
3416                               &_handler_table,
3417                               inc_table(),
3418                               compiler,
3419                               has_unsafe_access,
3420                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3421                               C->has_monitors(),
3422                               0,
3423                               C->rtm_state());
3424 
3425     if (C->log() != nullptr) { // Print code cache state into compiler log
3426       C->log()->code_cache_state();
3427     }
3428   }
3429 }
3430 void PhaseOutput::install_stub(const char* stub_name) {
3431   // Entry point will be accessed using stub_entry_point();
3432   if (code_buffer() == nullptr) {
3433     Matcher::soft_match_failure();
3434   } else {
3435     if (PrintAssembly && (WizardMode || Verbose))
3436       tty->print_cr("### Stub::%s", stub_name);
3437 
3438     if (!C->failing()) {
3439       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3440 
3441       // Make the NMethod
3442       // For now we mark the frame as never safe for profile stackwalking
3443       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >