< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 329     _handler_table(),
 330     _inc_table(),
 331     _safepoint_poll_table(),
 332     _entry_barrier_table(),
 333     _oop_map_set(NULL),
 334     _scratch_buffer_blob(NULL),
 335     _scratch_locs_memory(NULL),
 336     _scratch_const_size(-1),
 337     _in_scratch_emit_size(false),
 338     _frame_slots(0),
 339     _code_offsets(),
 340     _node_bundling_limit(0),
 341     _node_bundling_base(NULL),
 342     _orig_pc_slot(0),
 343     _orig_pc_slot_offset_in_bytes(0),
 344     _buf_sizes(),
 345     _block(NULL),
 346     _index(0) {
 347   C->set_output(this);
 348   if (C->stub_name() == NULL) {
 349     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 350   }
 351 }
 352 
 353 PhaseOutput::~PhaseOutput() {
 354   C->set_output(NULL);
 355   if (_scratch_buffer_blob != NULL) {
 356     BufferBlob::free(_scratch_buffer_blob);
 357   }
 358 }
 359 
 360 void PhaseOutput::perform_mach_node_analysis() {
 361   // Late barrier analysis must be done after schedule and bundle
 362   // Otherwise liveness based spilling will fail
 363   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 364   bs->late_barrier_analysis();
 365 
 366   pd_perform_mach_node_analysis();
 367 
 368   C->print_method(CompilerPhaseType::PHASE_MACHANALYSIS, 4);
 369 }
 370 
 371 // Convert Nodes to instruction bits and pass off to the VM
 372 void PhaseOutput::Output() {
 373   // RootNode goes
 374   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 375 
 376   // The number of new nodes (mostly MachNop) is proportional to
 377   // the number of java calls and inner loops which are aligned.
 378   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 379                             C->inner_loops()*(OptoLoopAlignment-1)),
 380                            "out of nodes before code generation" ) ) {
 381     return;
 382   }
 383   // Make sure I can find the Start Node
 384   Block *entry = C->cfg()->get_block(1);
 385   Block *broot = C->cfg()->get_root_block();
 386 
 387   const StartNode *start = entry->head()->as_Start();
 388 
 389   // Replace StartNode with prolog
 390   MachPrologNode *prolog = new MachPrologNode();

 391   entry->map_node(prolog, 0);
 392   C->cfg()->map_node_to_block(prolog, entry);
 393   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 394 
 395   // Virtual methods need an unverified entry point
 396 
 397   if( C->is_osr_compilation() ) {
 398     if( PoisonOSREntry ) {
 399       // TODO: Should use a ShouldNotReachHereNode...
 400       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 401     }
 402   } else {
 403     if( C->method() && !C->method()->flags().is_static() ) {
 404       // Insert unvalidated entry point
 405       C->cfg()->insert( broot, 0, new MachUEPNode() );











 406     }
 407 
 408   }
 409 
 410   // Break before main entry point
 411   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 412       (OptoBreakpoint && C->is_method_compilation())       ||
 413       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 414       (OptoBreakpointC2R && !C->method())                   ) {
 415     // checking for C->method() means that OptoBreakpoint does not apply to
 416     // runtime stubs or frame converters
 417     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 418   }
 419 
 420   // Insert epilogs before every return
 421   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 422     Block* block = C->cfg()->get_block(i);
 423     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 424       Node* m = block->end();
 425       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 426         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 427         block->add_inst(epilog);
 428         C->cfg()->map_node_to_block(epilog, block);
 429       }
 430     }
 431   }
 432 
 433   // Keeper of sizing aspects
 434   _buf_sizes = BufferSizingData();
 435 
 436   // Initialize code buffer
 437   estimate_buffer_size(_buf_sizes._const);
 438   if (C->failing()) return;
 439 
 440   // Pre-compute the length of blocks and replace
 441   // long branches with short if machine supports it.
 442   // Must be done before ScheduleAndBundle due to SPARC delay slots
 443   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 444   blk_starts[0] = 0;
 445   shorten_branches(blk_starts);
 446 

























 447   ScheduleAndBundle();
 448   if (C->failing()) {
 449     return;
 450   }
 451 
 452   perform_mach_node_analysis();
 453 
 454   // Complete sizing of codebuffer
 455   CodeBuffer* cb = init_buffer();
 456   if (cb == NULL || C->failing()) {
 457     return;
 458   }
 459 
 460   BuildOopMaps();
 461 
 462   if (C->failing())  {
 463     return;
 464   }
 465 
 466   fill_buffer(cb, blk_starts);

 587     // Sum all instruction sizes to compute block size
 588     uint last_inst = block->number_of_nodes();
 589     uint blk_size = 0;
 590     for (uint j = 0; j < last_inst; j++) {
 591       _index = j;
 592       Node* nj = block->get_node(_index);
 593       // Handle machine instruction nodes
 594       if (nj->is_Mach()) {
 595         MachNode* mach = nj->as_Mach();
 596         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 597         reloc_size += mach->reloc();
 598         if (mach->is_MachCall()) {
 599           // add size information for trampoline stub
 600           // class CallStubImpl is platform-specific and defined in the *.ad files.
 601           stub_size  += CallStubImpl::size_call_trampoline();
 602           reloc_size += CallStubImpl::reloc_call_trampoline();
 603 
 604           MachCallNode *mcall = mach->as_MachCall();
 605           // This destination address is NOT PC-relative
 606 
 607           mcall->method_set((intptr_t)mcall->entry_point());


 608 
 609           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 610             stub_size  += CompiledStaticCall::to_interp_stub_size();
 611             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 612           }
 613         } else if (mach->is_MachSafePoint()) {
 614           // If call/safepoint are adjacent, account for possible
 615           // nop to disambiguate the two safepoints.
 616           // ScheduleAndBundle() can rearrange nodes in a block,
 617           // check for all offsets inside this block.
 618           if (last_call_adr >= blk_starts[i]) {
 619             blk_size += nop_size;
 620           }
 621         }
 622         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 623           // Nop is inserted between "avoid back to back" instructions.
 624           // ScheduleAndBundle() can rearrange nodes in a block,
 625           // check for all offsets inside this block.
 626           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 627             blk_size += nop_size;

 842     // New functionality:
 843     //   Assert if the local is not top. In product mode let the new node
 844     //   override the old entry.
 845     assert(local == C->top(), "LocArray collision");
 846     if (local == C->top()) {
 847       return;
 848     }
 849     array->pop();
 850   }
 851   const Type *t = local->bottom_type();
 852 
 853   // Is it a safepoint scalar object node?
 854   if (local->is_SafePointScalarObject()) {
 855     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 856 
 857     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 858     if (sv == NULL) {
 859       ciKlass* cik = t->is_oopptr()->exact_klass();
 860       assert(cik->is_instance_klass() ||
 861              cik->is_array_klass(), "Not supported allocation.");

















 862       sv = new ObjectValue(spobj->_idx,
 863                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 864       set_sv_for_object_node(objs, sv);
 865 
 866       uint first_ind = spobj->first_index(sfpt->jvms());
 867       for (uint i = 0; i < spobj->n_fields(); i++) {
 868         Node* fld_node = sfpt->in(first_ind+i);
 869         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 870       }
 871     }
 872     array->append(sv);
 873     return;
 874   }
 875 
 876   // Grab the register number for the local
 877   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 878   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 879     // Record the double as two float registers.
 880     // The register mask for such a value always specifies two adjacent
 881     // float registers, with the lower register number even.
 882     // Normally, the allocation of high and low words to these registers
 883     // is irrelevant, because nearly all operations on register pairs
 884     // (e.g., StoreD) treat them as a single unit.
 885     // Here, we assume in addition that the words in these two registers
 886     // stored "naturally" (by operations like StoreD and double stores

1028       ShouldNotReachHere();
1029       break;
1030   }
1031 }
1032 
1033 // Determine if this node starts a bundle
1034 bool PhaseOutput::starts_bundle(const Node *n) const {
1035   return (_node_bundling_limit > n->_idx &&
1036           _node_bundling_base[n->_idx].starts_bundle());
1037 }
1038 
1039 //--------------------------Process_OopMap_Node--------------------------------
1040 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1041   // Handle special safepoint nodes for synchronization
1042   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1043   MachCallNode      *mcall;
1044 
1045   int safepoint_pc_offset = current_offset;
1046   bool is_method_handle_invoke = false;
1047   bool return_oop = false;

1048   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1049   bool arg_escape = false;
1050 
1051   // Add the safepoint in the DebugInfoRecorder
1052   if( !mach->is_MachCall() ) {
1053     mcall = NULL;
1054     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1055   } else {
1056     mcall = mach->as_MachCall();
1057 
1058     // Is the call a MethodHandle call?
1059     if (mcall->is_MachCallJava()) {
1060       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1061         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1062         is_method_handle_invoke = true;
1063       }
1064       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1065     }
1066 
1067     // Check if a call returns an object.
1068     if (mcall->returns_pointer()) {
1069       return_oop = true;
1070     }



1071     safepoint_pc_offset += mcall->ret_addr_offset();
1072     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1073   }
1074 
1075   // Loop over the JVMState list to add scope information
1076   // Do not skip safepoints with a NULL method, they need monitor info
1077   JVMState* youngest_jvms = sfn->jvms();
1078   int max_depth = youngest_jvms->depth();
1079 
1080   // Allocate the object pool for scalar-replaced objects -- the map from
1081   // small-integer keys (which can be recorded in the local and ostack
1082   // arrays) to descriptions of the object state.
1083   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1084 
1085   // Visit scopes from oldest to youngest.
1086   for (int depth = 1; depth <= max_depth; depth++) {
1087     JVMState* jvms = youngest_jvms->of_depth(depth);
1088     int idx;
1089     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1090     // Safepoints that do not have method() set only provide oop-map and monitor info

1174     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1175     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1176 
1177     // Make method available for all Safepoints
1178     ciMethod* scope_method = method ? method : C->method();
1179     // Describe the scope here
1180     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1181     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1182     // Now we can describe the scope.
1183     methodHandle null_mh;
1184     bool rethrow_exception = false;
1185     C->debug_info()->describe_scope(
1186       safepoint_pc_offset,
1187       null_mh,
1188       scope_method,
1189       jvms->bci(),
1190       jvms->should_reexecute(),
1191       rethrow_exception,
1192       is_method_handle_invoke,
1193       return_oop,

1194       has_ea_local_in_scope,
1195       arg_escape,
1196       locvals,
1197       expvals,
1198       monvals
1199     );
1200   } // End jvms loop
1201 
1202   // Mark the end of the scope set.
1203   C->debug_info()->end_safepoint(safepoint_pc_offset);
1204 }
1205 
1206 
1207 
1208 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1209 class NonSafepointEmitter {
1210     Compile*  C;
1211     JVMState* _pending_jvms;
1212     int       _pending_offset;
1213 

1551           MachNode *nop = new MachNopNode(nops_cnt);
1552           block->insert_node(nop, j++);
1553           last_inst++;
1554           C->cfg()->map_node_to_block(nop, block);
1555           // Ensure enough space.
1556           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1557           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1558             C->record_failure("CodeCache is full");
1559             return;
1560           }
1561           nop->emit(*cb, C->regalloc());
1562           cb->flush_bundle(true);
1563           current_offset = cb->insts_size();
1564         }
1565 
1566         bool observe_safepoint = is_sfn;
1567         // Remember the start of the last call in a basic block
1568         if (is_mcall) {
1569           MachCallNode *mcall = mach->as_MachCall();
1570 
1571           // This destination address is NOT PC-relative
1572           mcall->method_set((intptr_t)mcall->entry_point());


1573 
1574           // Save the return address
1575           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1576 
1577           observe_safepoint = mcall->guaranteed_safepoint();
1578         }
1579 
1580         // sfn will be valid whenever mcall is valid now because of inheritance
1581         if (observe_safepoint) {
1582           // Handle special safepoint nodes for synchronization
1583           if (!is_mcall) {
1584             MachSafePointNode *sfn = mach->as_MachSafePoint();
1585             // !!!!! Stubs only need an oopmap right now, so bail out
1586             if (sfn->jvms()->method() == NULL) {
1587               // Write the oopmap directly to the code blob??!!
1588               continue;
1589             }
1590           } // End synchronization
1591 
1592           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1716       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1717         node_offsets[n->_idx] = cb->insts_size();
1718       }
1719 #endif
1720       assert(!C->failing(), "Should not reach here if failing.");
1721 
1722       // "Normal" instruction case
1723       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1724       n->emit(*cb, C->regalloc());
1725       current_offset = cb->insts_size();
1726 
1727       // Above we only verified that there is enough space in the instruction section.
1728       // However, the instruction may emit stubs that cause code buffer expansion.
1729       // Bail out here if expansion failed due to a lack of code cache space.
1730       if (C->failing()) {
1731         return;
1732       }
1733 
1734       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1735              "ret_addr_offset() not within emitted code");
1736 
1737 #ifdef ASSERT
1738       uint n_size = n->size(C->regalloc());
1739       if (n_size < (current_offset-instr_offset)) {
1740         MachNode* mach = n->as_Mach();
1741         n->dump();
1742         mach->dump_format(C->regalloc(), tty);
1743         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1744         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1745         tty->print_cr(" ------------------- ");
1746         BufferBlob* blob = this->scratch_buffer_blob();
1747         address blob_begin = blob->content_begin();
1748         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1749         assert(false, "wrong size of mach node");
1750       }
1751 #endif
1752       non_safepoints.observe_instruction(n, current_offset);
1753 
1754       // mcall is last "call" that can be a safepoint
1755       // record it so we can see if a poll will directly follow it
1756       // in which case we'll need a pad to make the PcDesc sites unique

3106         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3107         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3108       }
3109     }
3110     // Do not allow defs of new derived values to float above GC
3111     // points unless the base is definitely available at the GC point.
3112 
3113     Node *m = b->get_node(i);
3114 
3115     // Add precedence edge from following safepoint to use of derived pointer
3116     if( last_safept_node != end_node &&
3117         m != last_safept_node) {
3118       for (uint k = 1; k < m->req(); k++) {
3119         const Type *t = m->in(k)->bottom_type();
3120         if( t->isa_oop_ptr() &&
3121             t->is_ptr()->offset() != 0 ) {
3122           last_safept_node->add_prec( m );
3123           break;
3124         }
3125       }













3126     }
3127 
3128     if( n->jvms() ) {           // Precedence edge from derived to safept
3129       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3130       if( b->get_node(last_safept) != last_safept_node ) {
3131         last_safept = b->find_node(last_safept_node);
3132       }
3133       for( uint j=last_safept; j > i; j-- ) {
3134         Node *mach = b->get_node(j);
3135         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3136           mach->add_prec( n );
3137       }
3138       last_safept = i;
3139       last_safept_node = m;
3140     }
3141   }
3142 
3143   if (fat_proj_seen) {
3144     // Garbage collect pinch nodes that were not consumed.
3145     // They are usually created by a fat kill MachProj for a call.

3264 }
3265 #endif
3266 
3267 //-----------------------init_scratch_buffer_blob------------------------------
3268 // Construct a temporary BufferBlob and cache it for this compile.
3269 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3270   // If there is already a scratch buffer blob allocated and the
3271   // constant section is big enough, use it.  Otherwise free the
3272   // current and allocate a new one.
3273   BufferBlob* blob = scratch_buffer_blob();
3274   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3275     // Use the current blob.
3276   } else {
3277     if (blob != NULL) {
3278       BufferBlob::free(blob);
3279     }
3280 
3281     ResourceMark rm;
3282     _scratch_const_size = const_size;
3283     int size = C2Compiler::initial_code_buffer_size(const_size);



















3284     blob = BufferBlob::create("Compile::scratch_buffer", size);
3285     // Record the buffer blob for next time.
3286     set_scratch_buffer_blob(blob);
3287     // Have we run out of code space?
3288     if (scratch_buffer_blob() == NULL) {
3289       // Let CompilerBroker disable further compilations.
3290       C->record_failure("Not enough space for scratch buffer in CodeCache");
3291       return;
3292     }
3293   }
3294 
3295   // Initialize the relocation buffers
3296   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3297   set_scratch_locs_memory(locs_buf);
3298 }
3299 
3300 
3301 //-----------------------scratch_emit_size-------------------------------------
3302 // Helper function that computes size by emitting code
3303 uint PhaseOutput::scratch_emit_size(const Node* n) {

3328   int lsize = MAX_locs_size / 3;
3329   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3330   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3331   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3332   // Mark as scratch buffer.
3333   buf.consts()->set_scratch_emit();
3334   buf.insts()->set_scratch_emit();
3335   buf.stubs()->set_scratch_emit();
3336 
3337   // Do the emission.
3338 
3339   Label fakeL; // Fake label for branch instructions.
3340   Label*   saveL = NULL;
3341   uint save_bnum = 0;
3342   bool is_branch = n->is_MachBranch();
3343   if (is_branch) {
3344     MacroAssembler masm(&buf);
3345     masm.bind(fakeL);
3346     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3347     n->as_MachBranch()->label_set(&fakeL, 0);






3348   }
3349   n->emit(buf, C->regalloc());
3350 
3351   // Emitting into the scratch buffer should not fail
3352   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3353 
3354   if (is_branch) // Restore label.

3355     n->as_MachBranch()->label_set(saveL, save_bnum);





3356 
3357   // End scratch_emit_size section.
3358   set_in_scratch_emit_size(false);
3359 
3360   return buf.insts_size();
3361 }
3362 
3363 void PhaseOutput::install() {
3364   if (!C->should_install_code()) {
3365     return;
3366   } else if (C->stub_function() != NULL) {
3367     install_stub(C->stub_name());
3368   } else {
3369     install_code(C->method(),
3370                  C->entry_bci(),
3371                  CompileBroker::compiler2(),
3372                  C->has_unsafe_access(),
3373                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3374                  C->rtm_state());
3375   }

3379                                int               entry_bci,
3380                                AbstractCompiler* compiler,
3381                                bool              has_unsafe_access,
3382                                bool              has_wide_vectors,
3383                                RTMState          rtm_state) {
3384   // Check if we want to skip execution of all compiled code.
3385   {
3386 #ifndef PRODUCT
3387     if (OptoNoExecute) {
3388       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3389       return;
3390     }
3391 #endif
3392     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3393 
3394     if (C->is_osr_compilation()) {
3395       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3396       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3397     } else {
3398       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3399       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3400     }
3401 
3402     C->env()->register_method(target,
3403                                      entry_bci,
3404                                      &_code_offsets,
3405                                      _orig_pc_slot_offset_in_bytes,
3406                                      code_buffer(),
3407                                      frame_size_in_words(),
3408                                      oop_map_set(),
3409                                      &_handler_table,
3410                                      inc_table(),
3411                                      compiler,
3412                                      has_unsafe_access,
3413                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3414                                      C->has_monitors(),
3415                                      0,
3416                                      C->rtm_state());
3417 
3418     if (C->log() != NULL) { // Print code cache state into compiler log
3419       C->log()->code_cache_state();
3420     }
3421   }
3422 }
3423 void PhaseOutput::install_stub(const char* stub_name) {
3424   // Entry point will be accessed using stub_entry_point();
3425   if (code_buffer() == NULL) {
3426     Matcher::soft_match_failure();
3427   } else {
3428     if (PrintAssembly && (WizardMode || Verbose))
3429       tty->print_cr("### Stub::%s", stub_name);
3430 
3431     if (!C->failing()) {
3432       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3433 
3434       // Make the NMethod
3435       // For now we mark the frame as never safe for profile stackwalking
3436       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 330     _handler_table(),
 331     _inc_table(),
 332     _safepoint_poll_table(),
 333     _entry_barrier_table(),
 334     _oop_map_set(NULL),
 335     _scratch_buffer_blob(NULL),
 336     _scratch_locs_memory(NULL),
 337     _scratch_const_size(-1),
 338     _in_scratch_emit_size(false),
 339     _frame_slots(0),
 340     _code_offsets(),
 341     _node_bundling_limit(0),
 342     _node_bundling_base(NULL),
 343     _orig_pc_slot(0),
 344     _orig_pc_slot_offset_in_bytes(0),
 345     _buf_sizes(),
 346     _block(NULL),
 347     _index(0) {
 348   C->set_output(this);
 349   if (C->stub_name() == NULL) {
 350     int fixed_slots = C->fixed_slots();
 351     if (C->needs_stack_repair()) {
 352       fixed_slots -= 2;
 353     }
 354     // TODO 8284443 Only reserve extra slot if needed
 355     if (InlineTypeReturnedAsFields) {
 356       fixed_slots -= 2;
 357     }
 358     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 359   }
 360 }
 361 
 362 PhaseOutput::~PhaseOutput() {
 363   C->set_output(NULL);
 364   if (_scratch_buffer_blob != NULL) {
 365     BufferBlob::free(_scratch_buffer_blob);
 366   }
 367 }
 368 
 369 void PhaseOutput::perform_mach_node_analysis() {
 370   // Late barrier analysis must be done after schedule and bundle
 371   // Otherwise liveness based spilling will fail
 372   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 373   bs->late_barrier_analysis();
 374 
 375   pd_perform_mach_node_analysis();
 376 
 377   C->print_method(CompilerPhaseType::PHASE_MACHANALYSIS, 4);
 378 }
 379 
 380 // Convert Nodes to instruction bits and pass off to the VM
 381 void PhaseOutput::Output() {
 382   // RootNode goes
 383   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 384 
 385   // The number of new nodes (mostly MachNop) is proportional to
 386   // the number of java calls and inner loops which are aligned.
 387   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 388                             C->inner_loops()*(OptoLoopAlignment-1)),
 389                            "out of nodes before code generation" ) ) {
 390     return;
 391   }
 392   // Make sure I can find the Start Node
 393   Block *entry = C->cfg()->get_block(1);
 394   Block *broot = C->cfg()->get_root_block();
 395 
 396   const StartNode *start = entry->head()->as_Start();
 397 
 398   // Replace StartNode with prolog
 399   Label verified_entry;
 400   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 401   entry->map_node(prolog, 0);
 402   C->cfg()->map_node_to_block(prolog, entry);
 403   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 404 
 405   // Virtual methods need an unverified entry point
 406   if (C->is_osr_compilation()) {
 407     if (PoisonOSREntry) {

 408       // TODO: Should use a ShouldNotReachHereNode...
 409       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 410     }
 411   } else {
 412     if (C->method()) {
 413       if (C->method()->has_scalarized_args()) {
 414         // Add entry point to unpack all inline type arguments
 415         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 416         if (!C->method()->is_static()) {
 417           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 418           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 419           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 420           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 421         }
 422       } else if (!C->method()->is_static()) {
 423         // Insert unvalidated entry point
 424         C->cfg()->insert(broot, 0, new MachUEPNode());
 425       }
 426     }

 427   }
 428 
 429   // Break before main entry point
 430   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 431       (OptoBreakpoint && C->is_method_compilation())       ||
 432       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 433       (OptoBreakpointC2R && !C->method())                   ) {
 434     // checking for C->method() means that OptoBreakpoint does not apply to
 435     // runtime stubs or frame converters
 436     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 437   }
 438 
 439   // Insert epilogs before every return
 440   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 441     Block* block = C->cfg()->get_block(i);
 442     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 443       Node* m = block->end();
 444       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 445         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 446         block->add_inst(epilog);
 447         C->cfg()->map_node_to_block(epilog, block);
 448       }
 449     }
 450   }
 451 
 452   // Keeper of sizing aspects
 453   _buf_sizes = BufferSizingData();
 454 
 455   // Initialize code buffer
 456   estimate_buffer_size(_buf_sizes._const);
 457   if (C->failing()) return;
 458 
 459   // Pre-compute the length of blocks and replace
 460   // long branches with short if machine supports it.
 461   // Must be done before ScheduleAndBundle due to SPARC delay slots
 462   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 463   blk_starts[0] = 0;
 464   shorten_branches(blk_starts);
 465 
 466   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 467     // Compute the offsets of the entry points required by the inline type calling convention
 468     if (!C->method()->is_static()) {
 469       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 470       // Entry                     (unverified) @ offset 0
 471       // Verified_Inline_Entry_RO
 472       // Inline_Entry              (unverified)
 473       // Verified_Inline_Entry
 474       uint offset = 0;
 475       _code_offsets.set_value(CodeOffsets::Entry, offset);
 476 
 477       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 478       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 479 
 480       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 481       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 482 
 483       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 484       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 485     } else {
 486       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 487       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 488     }
 489   }
 490 
 491   ScheduleAndBundle();
 492   if (C->failing()) {
 493     return;
 494   }
 495 
 496   perform_mach_node_analysis();
 497 
 498   // Complete sizing of codebuffer
 499   CodeBuffer* cb = init_buffer();
 500   if (cb == NULL || C->failing()) {
 501     return;
 502   }
 503 
 504   BuildOopMaps();
 505 
 506   if (C->failing())  {
 507     return;
 508   }
 509 
 510   fill_buffer(cb, blk_starts);

 631     // Sum all instruction sizes to compute block size
 632     uint last_inst = block->number_of_nodes();
 633     uint blk_size = 0;
 634     for (uint j = 0; j < last_inst; j++) {
 635       _index = j;
 636       Node* nj = block->get_node(_index);
 637       // Handle machine instruction nodes
 638       if (nj->is_Mach()) {
 639         MachNode* mach = nj->as_Mach();
 640         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 641         reloc_size += mach->reloc();
 642         if (mach->is_MachCall()) {
 643           // add size information for trampoline stub
 644           // class CallStubImpl is platform-specific and defined in the *.ad files.
 645           stub_size  += CallStubImpl::size_call_trampoline();
 646           reloc_size += CallStubImpl::reloc_call_trampoline();
 647 
 648           MachCallNode *mcall = mach->as_MachCall();
 649           // This destination address is NOT PC-relative
 650 
 651           if (mcall->entry_point() != NULL) {
 652             mcall->method_set((intptr_t)mcall->entry_point());
 653           }
 654 
 655           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 656             stub_size  += CompiledStaticCall::to_interp_stub_size();
 657             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 658           }
 659         } else if (mach->is_MachSafePoint()) {
 660           // If call/safepoint are adjacent, account for possible
 661           // nop to disambiguate the two safepoints.
 662           // ScheduleAndBundle() can rearrange nodes in a block,
 663           // check for all offsets inside this block.
 664           if (last_call_adr >= blk_starts[i]) {
 665             blk_size += nop_size;
 666           }
 667         }
 668         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 669           // Nop is inserted between "avoid back to back" instructions.
 670           // ScheduleAndBundle() can rearrange nodes in a block,
 671           // check for all offsets inside this block.
 672           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 673             blk_size += nop_size;

 888     // New functionality:
 889     //   Assert if the local is not top. In product mode let the new node
 890     //   override the old entry.
 891     assert(local == C->top(), "LocArray collision");
 892     if (local == C->top()) {
 893       return;
 894     }
 895     array->pop();
 896   }
 897   const Type *t = local->bottom_type();
 898 
 899   // Is it a safepoint scalar object node?
 900   if (local->is_SafePointScalarObject()) {
 901     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 902 
 903     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 904     if (sv == NULL) {
 905       ciKlass* cik = t->is_oopptr()->exact_klass();
 906       assert(cik->is_instance_klass() ||
 907              cik->is_array_klass(), "Not supported allocation.");
 908       uint first_ind = spobj->first_index(sfpt->jvms());
 909       // Nullable, scalarized inline types have an is_init input
 910       // that needs to be checked before using the field values.
 911       ScopeValue* is_init = NULL;
 912       if (cik->is_inlinetype()) {
 913         Node* init_node = sfpt->in(first_ind++);
 914         assert(init_node != NULL, "is_init node not found");
 915         if (!init_node->is_top()) {
 916           const TypeInt* init_type = init_node->bottom_type()->is_int();
 917           if (init_node->is_Con()) {
 918             is_init = new ConstantIntValue(init_type->get_con());
 919           } else {
 920             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 921             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 922           }
 923         }
 924       }
 925       sv = new ObjectValue(spobj->_idx,
 926                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 927       set_sv_for_object_node(objs, sv);
 928 

 929       for (uint i = 0; i < spobj->n_fields(); i++) {
 930         Node* fld_node = sfpt->in(first_ind+i);
 931         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 932       }
 933     }
 934     array->append(sv);
 935     return;
 936   }
 937 
 938   // Grab the register number for the local
 939   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 940   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 941     // Record the double as two float registers.
 942     // The register mask for such a value always specifies two adjacent
 943     // float registers, with the lower register number even.
 944     // Normally, the allocation of high and low words to these registers
 945     // is irrelevant, because nearly all operations on register pairs
 946     // (e.g., StoreD) treat them as a single unit.
 947     // Here, we assume in addition that the words in these two registers
 948     // stored "naturally" (by operations like StoreD and double stores

1090       ShouldNotReachHere();
1091       break;
1092   }
1093 }
1094 
1095 // Determine if this node starts a bundle
1096 bool PhaseOutput::starts_bundle(const Node *n) const {
1097   return (_node_bundling_limit > n->_idx &&
1098           _node_bundling_base[n->_idx].starts_bundle());
1099 }
1100 
1101 //--------------------------Process_OopMap_Node--------------------------------
1102 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1103   // Handle special safepoint nodes for synchronization
1104   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1105   MachCallNode      *mcall;
1106 
1107   int safepoint_pc_offset = current_offset;
1108   bool is_method_handle_invoke = false;
1109   bool return_oop = false;
1110   bool return_scalarized = false;
1111   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1112   bool arg_escape = false;
1113 
1114   // Add the safepoint in the DebugInfoRecorder
1115   if( !mach->is_MachCall() ) {
1116     mcall = NULL;
1117     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1118   } else {
1119     mcall = mach->as_MachCall();
1120 
1121     // Is the call a MethodHandle call?
1122     if (mcall->is_MachCallJava()) {
1123       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1124         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1125         is_method_handle_invoke = true;
1126       }
1127       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1128     }
1129 
1130     // Check if a call returns an object.
1131     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1132       return_oop = true;
1133     }
1134     if (mcall->returns_scalarized()) {
1135       return_scalarized = true;
1136     }
1137     safepoint_pc_offset += mcall->ret_addr_offset();
1138     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1139   }
1140 
1141   // Loop over the JVMState list to add scope information
1142   // Do not skip safepoints with a NULL method, they need monitor info
1143   JVMState* youngest_jvms = sfn->jvms();
1144   int max_depth = youngest_jvms->depth();
1145 
1146   // Allocate the object pool for scalar-replaced objects -- the map from
1147   // small-integer keys (which can be recorded in the local and ostack
1148   // arrays) to descriptions of the object state.
1149   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1150 
1151   // Visit scopes from oldest to youngest.
1152   for (int depth = 1; depth <= max_depth; depth++) {
1153     JVMState* jvms = youngest_jvms->of_depth(depth);
1154     int idx;
1155     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1156     // Safepoints that do not have method() set only provide oop-map and monitor info

1240     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1241     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1242 
1243     // Make method available for all Safepoints
1244     ciMethod* scope_method = method ? method : C->method();
1245     // Describe the scope here
1246     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1247     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1248     // Now we can describe the scope.
1249     methodHandle null_mh;
1250     bool rethrow_exception = false;
1251     C->debug_info()->describe_scope(
1252       safepoint_pc_offset,
1253       null_mh,
1254       scope_method,
1255       jvms->bci(),
1256       jvms->should_reexecute(),
1257       rethrow_exception,
1258       is_method_handle_invoke,
1259       return_oop,
1260       return_scalarized,
1261       has_ea_local_in_scope,
1262       arg_escape,
1263       locvals,
1264       expvals,
1265       monvals
1266     );
1267   } // End jvms loop
1268 
1269   // Mark the end of the scope set.
1270   C->debug_info()->end_safepoint(safepoint_pc_offset);
1271 }
1272 
1273 
1274 
1275 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1276 class NonSafepointEmitter {
1277     Compile*  C;
1278     JVMState* _pending_jvms;
1279     int       _pending_offset;
1280 

1618           MachNode *nop = new MachNopNode(nops_cnt);
1619           block->insert_node(nop, j++);
1620           last_inst++;
1621           C->cfg()->map_node_to_block(nop, block);
1622           // Ensure enough space.
1623           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1624           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1625             C->record_failure("CodeCache is full");
1626             return;
1627           }
1628           nop->emit(*cb, C->regalloc());
1629           cb->flush_bundle(true);
1630           current_offset = cb->insts_size();
1631         }
1632 
1633         bool observe_safepoint = is_sfn;
1634         // Remember the start of the last call in a basic block
1635         if (is_mcall) {
1636           MachCallNode *mcall = mach->as_MachCall();
1637 
1638           if (mcall->entry_point() != NULL) {
1639             // This destination address is NOT PC-relative
1640             mcall->method_set((intptr_t)mcall->entry_point());
1641           }
1642 
1643           // Save the return address
1644           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1645 
1646           observe_safepoint = mcall->guaranteed_safepoint();
1647         }
1648 
1649         // sfn will be valid whenever mcall is valid now because of inheritance
1650         if (observe_safepoint) {
1651           // Handle special safepoint nodes for synchronization
1652           if (!is_mcall) {
1653             MachSafePointNode *sfn = mach->as_MachSafePoint();
1654             // !!!!! Stubs only need an oopmap right now, so bail out
1655             if (sfn->jvms()->method() == NULL) {
1656               // Write the oopmap directly to the code blob??!!
1657               continue;
1658             }
1659           } // End synchronization
1660 
1661           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1785       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1786         node_offsets[n->_idx] = cb->insts_size();
1787       }
1788 #endif
1789       assert(!C->failing(), "Should not reach here if failing.");
1790 
1791       // "Normal" instruction case
1792       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1793       n->emit(*cb, C->regalloc());
1794       current_offset = cb->insts_size();
1795 
1796       // Above we only verified that there is enough space in the instruction section.
1797       // However, the instruction may emit stubs that cause code buffer expansion.
1798       // Bail out here if expansion failed due to a lack of code cache space.
1799       if (C->failing()) {
1800         return;
1801       }
1802 
1803       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1804              "ret_addr_offset() not within emitted code");

1805 #ifdef ASSERT
1806       uint n_size = n->size(C->regalloc());
1807       if (n_size < (current_offset-instr_offset)) {
1808         MachNode* mach = n->as_Mach();
1809         n->dump();
1810         mach->dump_format(C->regalloc(), tty);
1811         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1812         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1813         tty->print_cr(" ------------------- ");
1814         BufferBlob* blob = this->scratch_buffer_blob();
1815         address blob_begin = blob->content_begin();
1816         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1817         assert(false, "wrong size of mach node");
1818       }
1819 #endif
1820       non_safepoints.observe_instruction(n, current_offset);
1821 
1822       // mcall is last "call" that can be a safepoint
1823       // record it so we can see if a poll will directly follow it
1824       // in which case we'll need a pad to make the PcDesc sites unique

3174         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3175         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3176       }
3177     }
3178     // Do not allow defs of new derived values to float above GC
3179     // points unless the base is definitely available at the GC point.
3180 
3181     Node *m = b->get_node(i);
3182 
3183     // Add precedence edge from following safepoint to use of derived pointer
3184     if( last_safept_node != end_node &&
3185         m != last_safept_node) {
3186       for (uint k = 1; k < m->req(); k++) {
3187         const Type *t = m->in(k)->bottom_type();
3188         if( t->isa_oop_ptr() &&
3189             t->is_ptr()->offset() != 0 ) {
3190           last_safept_node->add_prec( m );
3191           break;
3192         }
3193       }
3194 
3195       // Do not allow a CheckCastPP node whose input is a raw pointer to
3196       // float past a safepoint.  This can occur when a buffered inline
3197       // type is allocated in a loop and the CheckCastPP from that
3198       // allocation is reused outside the loop.  If the use inside the
3199       // loop is scalarized the CheckCastPP will no longer be connected
3200       // to the loop safepoint.  See JDK-8264340.
3201       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3202         Node *def = m->in(1);
3203         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
3204           last_safept_node->add_prec(m);
3205         }
3206       }
3207     }
3208 
3209     if( n->jvms() ) {           // Precedence edge from derived to safept
3210       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3211       if( b->get_node(last_safept) != last_safept_node ) {
3212         last_safept = b->find_node(last_safept_node);
3213       }
3214       for( uint j=last_safept; j > i; j-- ) {
3215         Node *mach = b->get_node(j);
3216         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3217           mach->add_prec( n );
3218       }
3219       last_safept = i;
3220       last_safept_node = m;
3221     }
3222   }
3223 
3224   if (fat_proj_seen) {
3225     // Garbage collect pinch nodes that were not consumed.
3226     // They are usually created by a fat kill MachProj for a call.

3345 }
3346 #endif
3347 
3348 //-----------------------init_scratch_buffer_blob------------------------------
3349 // Construct a temporary BufferBlob and cache it for this compile.
3350 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3351   // If there is already a scratch buffer blob allocated and the
3352   // constant section is big enough, use it.  Otherwise free the
3353   // current and allocate a new one.
3354   BufferBlob* blob = scratch_buffer_blob();
3355   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3356     // Use the current blob.
3357   } else {
3358     if (blob != NULL) {
3359       BufferBlob::free(blob);
3360     }
3361 
3362     ResourceMark rm;
3363     _scratch_const_size = const_size;
3364     int size = C2Compiler::initial_code_buffer_size(const_size);
3365     if (C->has_scalarized_args()) {
3366       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3367       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3368       ciMethod* method = C->method();
3369       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3370       int arg_num = 0;
3371       if (!method->is_static()) {
3372         if (method->is_scalarized_arg(arg_num)) {
3373           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3374         }
3375         arg_num++;
3376       }
3377       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3378         if (method->is_scalarized_arg(arg_num)) {
3379           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3380         }
3381         arg_num++;
3382       }
3383     }
3384     blob = BufferBlob::create("Compile::scratch_buffer", size);
3385     // Record the buffer blob for next time.
3386     set_scratch_buffer_blob(blob);
3387     // Have we run out of code space?
3388     if (scratch_buffer_blob() == NULL) {
3389       // Let CompilerBroker disable further compilations.
3390       C->record_failure("Not enough space for scratch buffer in CodeCache");
3391       return;
3392     }
3393   }
3394 
3395   // Initialize the relocation buffers
3396   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3397   set_scratch_locs_memory(locs_buf);
3398 }
3399 
3400 
3401 //-----------------------scratch_emit_size-------------------------------------
3402 // Helper function that computes size by emitting code
3403 uint PhaseOutput::scratch_emit_size(const Node* n) {

3428   int lsize = MAX_locs_size / 3;
3429   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3430   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3431   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3432   // Mark as scratch buffer.
3433   buf.consts()->set_scratch_emit();
3434   buf.insts()->set_scratch_emit();
3435   buf.stubs()->set_scratch_emit();
3436 
3437   // Do the emission.
3438 
3439   Label fakeL; // Fake label for branch instructions.
3440   Label*   saveL = NULL;
3441   uint save_bnum = 0;
3442   bool is_branch = n->is_MachBranch();
3443   if (is_branch) {
3444     MacroAssembler masm(&buf);
3445     masm.bind(fakeL);
3446     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3447     n->as_MachBranch()->label_set(&fakeL, 0);
3448   } else if (n->is_MachProlog()) {
3449     saveL = ((MachPrologNode*)n)->_verified_entry;
3450     ((MachPrologNode*)n)->_verified_entry = &fakeL;
3451   } else if (n->is_MachVEP()) {
3452     saveL = ((MachVEPNode*)n)->_verified_entry;
3453     ((MachVEPNode*)n)->_verified_entry = &fakeL;
3454   }
3455   n->emit(buf, C->regalloc());
3456 
3457   // Emitting into the scratch buffer should not fail
3458   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3459 
3460   // Restore label.
3461   if (is_branch) {
3462     n->as_MachBranch()->label_set(saveL, save_bnum);
3463   } else if (n->is_MachProlog()) {
3464     ((MachPrologNode*)n)->_verified_entry = saveL;
3465   } else if (n->is_MachVEP()) {
3466     ((MachVEPNode*)n)->_verified_entry = saveL;
3467   }
3468 
3469   // End scratch_emit_size section.
3470   set_in_scratch_emit_size(false);
3471 
3472   return buf.insts_size();
3473 }
3474 
3475 void PhaseOutput::install() {
3476   if (!C->should_install_code()) {
3477     return;
3478   } else if (C->stub_function() != NULL) {
3479     install_stub(C->stub_name());
3480   } else {
3481     install_code(C->method(),
3482                  C->entry_bci(),
3483                  CompileBroker::compiler2(),
3484                  C->has_unsafe_access(),
3485                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3486                  C->rtm_state());
3487   }

3491                                int               entry_bci,
3492                                AbstractCompiler* compiler,
3493                                bool              has_unsafe_access,
3494                                bool              has_wide_vectors,
3495                                RTMState          rtm_state) {
3496   // Check if we want to skip execution of all compiled code.
3497   {
3498 #ifndef PRODUCT
3499     if (OptoNoExecute) {
3500       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3501       return;
3502     }
3503 #endif
3504     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3505 
3506     if (C->is_osr_compilation()) {
3507       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3508       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3509     } else {
3510       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3511       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3512         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3513       }
3514       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3515         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3516       }
3517       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3518         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3519       }
3520       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3521     }
3522 
3523     C->env()->register_method(target,
3524                               entry_bci,
3525                               &_code_offsets,
3526                               _orig_pc_slot_offset_in_bytes,
3527                               code_buffer(),
3528                               frame_size_in_words(),
3529                               _oop_map_set,
3530                               &_handler_table,
3531                               inc_table(),
3532                               compiler,
3533                               has_unsafe_access,
3534                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3535                               C->has_monitors(),
3536                               0,
3537                               C->rtm_state());
3538 
3539     if (C->log() != NULL) { // Print code cache state into compiler log
3540       C->log()->code_cache_state();
3541     }
3542   }
3543 }
3544 void PhaseOutput::install_stub(const char* stub_name) {
3545   // Entry point will be accessed using stub_entry_point();
3546   if (code_buffer() == NULL) {
3547     Matcher::soft_match_failure();
3548   } else {
3549     if (PrintAssembly && (WizardMode || Verbose))
3550       tty->print_cr("### Stub::%s", stub_name);
3551 
3552     if (!C->failing()) {
3553       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3554 
3555       // Make the NMethod
3556       // For now we mark the frame as never safe for profile stackwalking
3557       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >