< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/runtime.hpp"
  51 #include "opto/subnode.hpp"
  52 #include "opto/type.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "utilities/macros.hpp"

 292   return result;
 293 }
 294 
 295 PhaseOutput::PhaseOutput()
 296   : Phase(Phase::Output),
 297     _code_buffer("Compile::Fill_buffer"),
 298     _first_block_size(0),
 299     _handler_table(),
 300     _inc_table(),
 301     _oop_map_set(NULL),
 302     _scratch_buffer_blob(NULL),
 303     _scratch_locs_memory(NULL),
 304     _scratch_const_size(-1),
 305     _in_scratch_emit_size(false),
 306     _frame_slots(0),
 307     _code_offsets(),
 308     _node_bundling_limit(0),
 309     _node_bundling_base(NULL),
 310     _orig_pc_slot(0),
 311     _orig_pc_slot_offset_in_bytes(0),

 312     _buf_sizes(),
 313     _block(NULL),
 314     _index(0) {
 315   C->set_output(this);
 316   if (C->stub_name() == NULL) {
 317     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);





 318   }
 319 }
 320 
 321 PhaseOutput::~PhaseOutput() {
 322   C->set_output(NULL);
 323   if (_scratch_buffer_blob != NULL) {
 324     BufferBlob::free(_scratch_buffer_blob);
 325   }
 326 }
 327 
 328 void PhaseOutput::perform_mach_node_analysis() {
 329   // Late barrier analysis must be done after schedule and bundle
 330   // Otherwise liveness based spilling will fail
 331   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 332   bs->late_barrier_analysis();
 333 
 334   pd_perform_mach_node_analysis();
 335 }
 336 
 337 // Convert Nodes to instruction bits and pass off to the VM
 338 void PhaseOutput::Output() {
 339   // RootNode goes
 340   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 341 
 342   // The number of new nodes (mostly MachNop) is proportional to
 343   // the number of java calls and inner loops which are aligned.
 344   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 345                             C->inner_loops()*(OptoLoopAlignment-1)),
 346                            "out of nodes before code generation" ) ) {
 347     return;
 348   }
 349   // Make sure I can find the Start Node
 350   Block *entry = C->cfg()->get_block(1);
 351   Block *broot = C->cfg()->get_root_block();
 352 
 353   const StartNode *start = entry->head()->as_Start();
 354 
 355   // Replace StartNode with prolog
 356   MachPrologNode *prolog = new MachPrologNode();

 357   entry->map_node(prolog, 0);
 358   C->cfg()->map_node_to_block(prolog, entry);
 359   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 360 
 361   // Virtual methods need an unverified entry point
 362 
 363   if( C->is_osr_compilation() ) {
 364     if( PoisonOSREntry ) {
 365       // TODO: Should use a ShouldNotReachHereNode...
 366       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 367     }
 368   } else {
 369     if( C->method() && !C->method()->flags().is_static() ) {
 370       // Insert unvalidated entry point
 371       C->cfg()->insert( broot, 0, new MachUEPNode() );











 372     }
 373 
 374   }
 375 
 376   // Break before main entry point
 377   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 378       (OptoBreakpoint && C->is_method_compilation())       ||
 379       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 380       (OptoBreakpointC2R && !C->method())                   ) {
 381     // checking for C->method() means that OptoBreakpoint does not apply to
 382     // runtime stubs or frame converters
 383     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 384   }
 385 
 386   // Insert epilogs before every return
 387   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 388     Block* block = C->cfg()->get_block(i);
 389     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 390       Node* m = block->end();
 391       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 392         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 393         block->add_inst(epilog);
 394         C->cfg()->map_node_to_block(epilog, block);
 395       }
 396     }
 397   }
 398 
 399   // Keeper of sizing aspects
 400   _buf_sizes = BufferSizingData();
 401 
 402   // Initialize code buffer
 403   estimate_buffer_size(_buf_sizes._const);
 404   if (C->failing()) return;
 405 
 406   // Pre-compute the length of blocks and replace
 407   // long branches with short if machine supports it.
 408   // Must be done before ScheduleAndBundle due to SPARC delay slots
 409   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 410   blk_starts[0] = 0;
 411   shorten_branches(blk_starts);
 412 

























 413   ScheduleAndBundle();
 414   if (C->failing()) {
 415     return;
 416   }
 417 
 418   perform_mach_node_analysis();
 419 
 420   // Complete sizing of codebuffer
 421   CodeBuffer* cb = init_buffer();
 422   if (cb == NULL || C->failing()) {
 423     return;
 424   }
 425 
 426   BuildOopMaps();
 427 
 428   if (C->failing())  {
 429     return;
 430   }
 431 
 432   fill_buffer(cb, blk_starts);

 553     // Sum all instruction sizes to compute block size
 554     uint last_inst = block->number_of_nodes();
 555     uint blk_size = 0;
 556     for (uint j = 0; j < last_inst; j++) {
 557       _index = j;
 558       Node* nj = block->get_node(_index);
 559       // Handle machine instruction nodes
 560       if (nj->is_Mach()) {
 561         MachNode* mach = nj->as_Mach();
 562         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 563         reloc_size += mach->reloc();
 564         if (mach->is_MachCall()) {
 565           // add size information for trampoline stub
 566           // class CallStubImpl is platform-specific and defined in the *.ad files.
 567           stub_size  += CallStubImpl::size_call_trampoline();
 568           reloc_size += CallStubImpl::reloc_call_trampoline();
 569 
 570           MachCallNode *mcall = mach->as_MachCall();
 571           // This destination address is NOT PC-relative
 572 
 573           mcall->method_set((intptr_t)mcall->entry_point());


 574 
 575           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 576             stub_size  += CompiledStaticCall::to_interp_stub_size();
 577             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 578           }
 579         } else if (mach->is_MachSafePoint()) {
 580           // If call/safepoint are adjacent, account for possible
 581           // nop to disambiguate the two safepoints.
 582           // ScheduleAndBundle() can rearrange nodes in a block,
 583           // check for all offsets inside this block.
 584           if (last_call_adr >= blk_starts[i]) {
 585             blk_size += nop_size;
 586           }
 587         }
 588         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 589           // Nop is inserted between "avoid back to back" instructions.
 590           // ScheduleAndBundle() can rearrange nodes in a block,
 591           // check for all offsets inside this block.
 592           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 593             blk_size += nop_size;

 808     // New functionality:
 809     //   Assert if the local is not top. In product mode let the new node
 810     //   override the old entry.
 811     assert(local == C->top(), "LocArray collision");
 812     if (local == C->top()) {
 813       return;
 814     }
 815     array->pop();
 816   }
 817   const Type *t = local->bottom_type();
 818 
 819   // Is it a safepoint scalar object node?
 820   if (local->is_SafePointScalarObject()) {
 821     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 822 
 823     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 824     if (sv == NULL) {
 825       ciKlass* cik = t->is_oopptr()->klass();
 826       assert(cik->is_instance_klass() ||
 827              cik->is_array_klass(), "Not supported allocation.");

















 828       ScopeValue* klass_sv = new ConstantOopWriteValue(cik->java_mirror()->constant_encoding());
 829       sv = spobj->is_auto_box() ? new AutoBoxObjectValue(spobj->_idx, klass_sv)
 830                                     : new ObjectValue(spobj->_idx, klass_sv);
 831       set_sv_for_object_node(objs, sv);
 832 
 833       uint first_ind = spobj->first_index(sfpt->jvms());
 834       for (uint i = 0; i < spobj->n_fields(); i++) {
 835         Node* fld_node = sfpt->in(first_ind+i);
 836         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 837       }
 838     }
 839     array->append(sv);
 840     return;
 841   }
 842 
 843   // Grab the register number for the local
 844   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 845   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 846     // Record the double as two float registers.
 847     // The register mask for such a value always specifies two adjacent
 848     // float registers, with the lower register number even.
 849     // Normally, the allocation of high and low words to these registers
 850     // is irrelevant, because nearly all operations on register pairs
 851     // (e.g., StoreD) treat them as a single unit.
 852     // Here, we assume in addition that the words in these two registers
 853     // stored "naturally" (by operations like StoreD and double stores

 988       break;
 989   }
 990 }
 991 
 992 // Determine if this node starts a bundle
 993 bool PhaseOutput::starts_bundle(const Node *n) const {
 994   return (_node_bundling_limit > n->_idx &&
 995           _node_bundling_base[n->_idx].starts_bundle());
 996 }
 997 
 998 //--------------------------Process_OopMap_Node--------------------------------
 999 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1000   // Handle special safepoint nodes for synchronization
1001   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1002   MachCallNode      *mcall;
1003 
1004   int safepoint_pc_offset = current_offset;
1005   bool is_method_handle_invoke = false;
1006   bool is_opt_native = false;
1007   bool return_oop = false;

1008   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1009   bool arg_escape = false;
1010 
1011   // Add the safepoint in the DebugInfoRecorder
1012   if( !mach->is_MachCall() ) {
1013     mcall = NULL;
1014     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1015   } else {
1016     mcall = mach->as_MachCall();
1017 
1018     // Is the call a MethodHandle call?
1019     if (mcall->is_MachCallJava()) {
1020       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1021         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1022         is_method_handle_invoke = true;
1023       }
1024       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1025     } else if (mcall->is_MachCallNative()) {
1026       is_opt_native = true;
1027     }
1028 
1029     // Check if a call returns an object.
1030     if (mcall->returns_pointer()) {
1031       return_oop = true;
1032     }



1033     safepoint_pc_offset += mcall->ret_addr_offset();
1034     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1035   }
1036 
1037   // Loop over the JVMState list to add scope information
1038   // Do not skip safepoints with a NULL method, they need monitor info
1039   JVMState* youngest_jvms = sfn->jvms();
1040   int max_depth = youngest_jvms->depth();
1041 
1042   // Allocate the object pool for scalar-replaced objects -- the map from
1043   // small-integer keys (which can be recorded in the local and ostack
1044   // arrays) to descriptions of the object state.
1045   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1046 
1047   // Visit scopes from oldest to youngest.
1048   for (int depth = 1; depth <= max_depth; depth++) {
1049     JVMState* jvms = youngest_jvms->of_depth(depth);
1050     int idx;
1051     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1052     // Safepoints that do not have method() set only provide oop-map and monitor info

1138     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1139 
1140     // Make method available for all Safepoints
1141     ciMethod* scope_method = method ? method : C->method();
1142     // Describe the scope here
1143     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1144     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1145     // Now we can describe the scope.
1146     methodHandle null_mh;
1147     bool rethrow_exception = false;
1148     C->debug_info()->describe_scope(
1149       safepoint_pc_offset,
1150       null_mh,
1151       scope_method,
1152       jvms->bci(),
1153       jvms->should_reexecute(),
1154       rethrow_exception,
1155       is_method_handle_invoke,
1156       is_opt_native,
1157       return_oop,

1158       has_ea_local_in_scope,
1159       arg_escape,
1160       locvals,
1161       expvals,
1162       monvals
1163     );
1164   } // End jvms loop
1165 
1166   // Mark the end of the scope set.
1167   C->debug_info()->end_safepoint(safepoint_pc_offset);
1168 }
1169 
1170 
1171 
1172 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1173 class NonSafepointEmitter {
1174     Compile*  C;
1175     JVMState* _pending_jvms;
1176     int       _pending_offset;
1177 

1514           MachNode *nop = new MachNopNode(nops_cnt);
1515           block->insert_node(nop, j++);
1516           last_inst++;
1517           C->cfg()->map_node_to_block(nop, block);
1518           // Ensure enough space.
1519           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1520           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1521             C->record_failure("CodeCache is full");
1522             return;
1523           }
1524           nop->emit(*cb, C->regalloc());
1525           cb->flush_bundle(true);
1526           current_offset = cb->insts_size();
1527         }
1528 
1529         bool observe_safepoint = is_sfn;
1530         // Remember the start of the last call in a basic block
1531         if (is_mcall) {
1532           MachCallNode *mcall = mach->as_MachCall();
1533 
1534           // This destination address is NOT PC-relative
1535           mcall->method_set((intptr_t)mcall->entry_point());


1536 
1537           // Save the return address
1538           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1539 
1540           observe_safepoint = mcall->guaranteed_safepoint();
1541         }
1542 
1543         // sfn will be valid whenever mcall is valid now because of inheritance
1544         if (observe_safepoint) {
1545           // Handle special safepoint nodes for synchronization
1546           if (!is_mcall) {
1547             MachSafePointNode *sfn = mach->as_MachSafePoint();
1548             // !!!!! Stubs only need an oopmap right now, so bail out
1549             if (sfn->jvms()->method() == NULL) {
1550               // Write the oopmap directly to the code blob??!!
1551               continue;
1552             }
1553           } // End synchronization
1554 
1555           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1679       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1680         node_offsets[n->_idx] = cb->insts_size();
1681       }
1682 #endif
1683       assert(!C->failing(), "Should not reach here if failing.");
1684 
1685       // "Normal" instruction case
1686       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1687       n->emit(*cb, C->regalloc());
1688       current_offset = cb->insts_size();
1689 
1690       // Above we only verified that there is enough space in the instruction section.
1691       // However, the instruction may emit stubs that cause code buffer expansion.
1692       // Bail out here if expansion failed due to a lack of code cache space.
1693       if (C->failing()) {
1694         return;
1695       }
1696 
1697       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1698              "ret_addr_offset() not within emitted code");
1699 
1700 #ifdef ASSERT
1701       uint n_size = n->size(C->regalloc());
1702       if (n_size < (current_offset-instr_offset)) {
1703         MachNode* mach = n->as_Mach();
1704         n->dump();
1705         mach->dump_format(C->regalloc(), tty);
1706         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1707         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1708         tty->print_cr(" ------------------- ");
1709         BufferBlob* blob = this->scratch_buffer_blob();
1710         address blob_begin = blob->content_begin();
1711         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1712         assert(false, "wrong size of mach node");
1713       }
1714 #endif
1715       non_safepoints.observe_instruction(n, current_offset);
1716 
1717       // mcall is last "call" that can be a safepoint
1718       // record it so we can see if a poll will directly follow it
1719       // in which case we'll need a pad to make the PcDesc sites unique

3092         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3093         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3094       }
3095     }
3096     // Do not allow defs of new derived values to float above GC
3097     // points unless the base is definitely available at the GC point.
3098 
3099     Node *m = b->get_node(i);
3100 
3101     // Add precedence edge from following safepoint to use of derived pointer
3102     if( last_safept_node != end_node &&
3103         m != last_safept_node) {
3104       for (uint k = 1; k < m->req(); k++) {
3105         const Type *t = m->in(k)->bottom_type();
3106         if( t->isa_oop_ptr() &&
3107             t->is_ptr()->offset() != 0 ) {
3108           last_safept_node->add_prec( m );
3109           break;
3110         }
3111       }













3112     }
3113 
3114     if( n->jvms() ) {           // Precedence edge from derived to safept
3115       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3116       if( b->get_node(last_safept) != last_safept_node ) {
3117         last_safept = b->find_node(last_safept_node);
3118       }
3119       for( uint j=last_safept; j > i; j-- ) {
3120         Node *mach = b->get_node(j);
3121         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3122           mach->add_prec( n );
3123       }
3124       last_safept = i;
3125       last_safept_node = m;
3126     }
3127   }
3128 
3129   if (fat_proj_seen) {
3130     // Garbage collect pinch nodes that were not consumed.
3131     // They are usually created by a fat kill MachProj for a call.

3250 }
3251 #endif
3252 
3253 //-----------------------init_scratch_buffer_blob------------------------------
3254 // Construct a temporary BufferBlob and cache it for this compile.
3255 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3256   // If there is already a scratch buffer blob allocated and the
3257   // constant section is big enough, use it.  Otherwise free the
3258   // current and allocate a new one.
3259   BufferBlob* blob = scratch_buffer_blob();
3260   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3261     // Use the current blob.
3262   } else {
3263     if (blob != NULL) {
3264       BufferBlob::free(blob);
3265     }
3266 
3267     ResourceMark rm;
3268     _scratch_const_size = const_size;
3269     int size = C2Compiler::initial_code_buffer_size(const_size);










3270     blob = BufferBlob::create("Compile::scratch_buffer", size);
3271     // Record the buffer blob for next time.
3272     set_scratch_buffer_blob(blob);
3273     // Have we run out of code space?
3274     if (scratch_buffer_blob() == NULL) {
3275       // Let CompilerBroker disable further compilations.
3276       C->record_failure("Not enough space for scratch buffer in CodeCache");
3277       return;
3278     }
3279   }
3280 
3281   // Initialize the relocation buffers
3282   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3283   set_scratch_locs_memory(locs_buf);
3284 }
3285 
3286 
3287 //-----------------------scratch_emit_size-------------------------------------
3288 // Helper function that computes size by emitting code
3289 uint PhaseOutput::scratch_emit_size(const Node* n) {

3314   int lsize = MAX_locs_size / 3;
3315   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3316   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3317   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3318   // Mark as scratch buffer.
3319   buf.consts()->set_scratch_emit();
3320   buf.insts()->set_scratch_emit();
3321   buf.stubs()->set_scratch_emit();
3322 
3323   // Do the emission.
3324 
3325   Label fakeL; // Fake label for branch instructions.
3326   Label*   saveL = NULL;
3327   uint save_bnum = 0;
3328   bool is_branch = n->is_MachBranch();
3329   if (is_branch) {
3330     MacroAssembler masm(&buf);
3331     masm.bind(fakeL);
3332     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3333     n->as_MachBranch()->label_set(&fakeL, 0);






3334   }
3335   n->emit(buf, C->regalloc());
3336 
3337   // Emitting into the scratch buffer should not fail
3338   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3339 
3340   if (is_branch) // Restore label.

3341     n->as_MachBranch()->label_set(saveL, save_bnum);





3342 
3343   // End scratch_emit_size section.
3344   set_in_scratch_emit_size(false);
3345 
3346   return buf.insts_size();
3347 }
3348 
3349 void PhaseOutput::install() {
3350   if (!C->should_install_code()) {
3351     return;
3352   } else if (C->stub_function() != NULL) {
3353     install_stub(C->stub_name());
3354   } else {
3355     install_code(C->method(),
3356                  C->entry_bci(),
3357                  CompileBroker::compiler2(),
3358                  C->has_unsafe_access(),
3359                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3360                  C->rtm_state());
3361   }

3365                                int               entry_bci,
3366                                AbstractCompiler* compiler,
3367                                bool              has_unsafe_access,
3368                                bool              has_wide_vectors,
3369                                RTMState          rtm_state) {
3370   // Check if we want to skip execution of all compiled code.
3371   {
3372 #ifndef PRODUCT
3373     if (OptoNoExecute) {
3374       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3375       return;
3376     }
3377 #endif
3378     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3379 
3380     if (C->is_osr_compilation()) {
3381       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3382       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3383     } else {
3384       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3385       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3386     }
3387 
3388     C->env()->register_method(target,
3389                                      entry_bci,
3390                                      &_code_offsets,
3391                                      _orig_pc_slot_offset_in_bytes,
3392                                      code_buffer(),
3393                                      frame_size_in_words(),
3394                                      oop_map_set(),
3395                                      &_handler_table,
3396                                      inc_table(),
3397                                      compiler,
3398                                      has_unsafe_access,
3399                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3400                                      C->rtm_state(),
3401                                      C->native_invokers());
3402 
3403     if (C->log() != NULL) { // Print code cache state into compiler log
3404       C->log()->code_cache_state();
3405     }
3406   }
3407 }
3408 void PhaseOutput::install_stub(const char* stub_name) {
3409   // Entry point will be accessed using stub_entry_point();
3410   if (code_buffer() == NULL) {
3411     Matcher::soft_match_failure();
3412   } else {
3413     if (PrintAssembly && (WizardMode || Verbose))
3414       tty->print_cr("### Stub::%s", stub_name);
3415 
3416     if (!C->failing()) {
3417       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3418 
3419       // Make the NMethod
3420       // For now we mark the frame as never safe for profile stackwalking
3421       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"
  56 #include "utilities/macros.hpp"

 293   return result;
 294 }
 295 
 296 PhaseOutput::PhaseOutput()
 297   : Phase(Phase::Output),
 298     _code_buffer("Compile::Fill_buffer"),
 299     _first_block_size(0),
 300     _handler_table(),
 301     _inc_table(),
 302     _oop_map_set(NULL),
 303     _scratch_buffer_blob(NULL),
 304     _scratch_locs_memory(NULL),
 305     _scratch_const_size(-1),
 306     _in_scratch_emit_size(false),
 307     _frame_slots(0),
 308     _code_offsets(),
 309     _node_bundling_limit(0),
 310     _node_bundling_base(NULL),
 311     _orig_pc_slot(0),
 312     _orig_pc_slot_offset_in_bytes(0),
 313     _sp_inc_slot(0),
 314     _buf_sizes(),
 315     _block(NULL),
 316     _index(0) {
 317   C->set_output(this);
 318   if (C->stub_name() == NULL) {
 319     int fixed_slots = C->fixed_slots();
 320     if (C->needs_stack_repair()) {
 321       fixed_slots -= 2;
 322       _sp_inc_slot = fixed_slots;
 323     }
 324     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 325   }
 326 }
 327 
 328 PhaseOutput::~PhaseOutput() {
 329   C->set_output(NULL);
 330   if (_scratch_buffer_blob != NULL) {
 331     BufferBlob::free(_scratch_buffer_blob);
 332   }
 333 }
 334 
 335 void PhaseOutput::perform_mach_node_analysis() {
 336   // Late barrier analysis must be done after schedule and bundle
 337   // Otherwise liveness based spilling will fail
 338   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 339   bs->late_barrier_analysis();
 340 
 341   pd_perform_mach_node_analysis();
 342 }
 343 
 344 // Convert Nodes to instruction bits and pass off to the VM
 345 void PhaseOutput::Output() {
 346   // RootNode goes
 347   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 348 
 349   // The number of new nodes (mostly MachNop) is proportional to
 350   // the number of java calls and inner loops which are aligned.
 351   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 352                             C->inner_loops()*(OptoLoopAlignment-1)),
 353                            "out of nodes before code generation" ) ) {
 354     return;
 355   }
 356   // Make sure I can find the Start Node
 357   Block *entry = C->cfg()->get_block(1);
 358   Block *broot = C->cfg()->get_root_block();
 359 
 360   const StartNode *start = entry->head()->as_Start();
 361 
 362   // Replace StartNode with prolog
 363   Label verified_entry;
 364   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 365   entry->map_node(prolog, 0);
 366   C->cfg()->map_node_to_block(prolog, entry);
 367   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 368 
 369   // Virtual methods need an unverified entry point
 370   if (C->is_osr_compilation()) {
 371     if (PoisonOSREntry) {

 372       // TODO: Should use a ShouldNotReachHereNode...
 373       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 374     }
 375   } else {
 376     if (C->method()) {
 377       if (C->method()->has_scalarized_args()) {
 378         // Add entry point to unpack all inline type arguments
 379         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 380         if (!C->method()->is_static()) {
 381           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 382           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 383           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 384           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 385         }
 386       } else if (!C->method()->is_static()) {
 387         // Insert unvalidated entry point
 388         C->cfg()->insert(broot, 0, new MachUEPNode());
 389       }
 390     }

 391   }
 392 
 393   // Break before main entry point
 394   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 395       (OptoBreakpoint && C->is_method_compilation())       ||
 396       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 397       (OptoBreakpointC2R && !C->method())                   ) {
 398     // checking for C->method() means that OptoBreakpoint does not apply to
 399     // runtime stubs or frame converters
 400     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 401   }
 402 
 403   // Insert epilogs before every return
 404   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 405     Block* block = C->cfg()->get_block(i);
 406     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 407       Node* m = block->end();
 408       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 409         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 410         block->add_inst(epilog);
 411         C->cfg()->map_node_to_block(epilog, block);
 412       }
 413     }
 414   }
 415 
 416   // Keeper of sizing aspects
 417   _buf_sizes = BufferSizingData();
 418 
 419   // Initialize code buffer
 420   estimate_buffer_size(_buf_sizes._const);
 421   if (C->failing()) return;
 422 
 423   // Pre-compute the length of blocks and replace
 424   // long branches with short if machine supports it.
 425   // Must be done before ScheduleAndBundle due to SPARC delay slots
 426   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 427   blk_starts[0] = 0;
 428   shorten_branches(blk_starts);
 429 
 430   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 431     // Compute the offsets of the entry points required by the inline type calling convention
 432     if (!C->method()->is_static()) {
 433       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 434       // Entry                     (unverified) @ offset 0
 435       // Verified_Inline_Entry_RO
 436       // Inline_Entry              (unverified)
 437       // Verified_Inline_Entry
 438       uint offset = 0;
 439       _code_offsets.set_value(CodeOffsets::Entry, offset);
 440 
 441       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 442       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 443 
 444       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 445       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 446 
 447       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 448       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 449     } else {
 450       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 451       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 452     }
 453   }
 454 
 455   ScheduleAndBundle();
 456   if (C->failing()) {
 457     return;
 458   }
 459 
 460   perform_mach_node_analysis();
 461 
 462   // Complete sizing of codebuffer
 463   CodeBuffer* cb = init_buffer();
 464   if (cb == NULL || C->failing()) {
 465     return;
 466   }
 467 
 468   BuildOopMaps();
 469 
 470   if (C->failing())  {
 471     return;
 472   }
 473 
 474   fill_buffer(cb, blk_starts);

 595     // Sum all instruction sizes to compute block size
 596     uint last_inst = block->number_of_nodes();
 597     uint blk_size = 0;
 598     for (uint j = 0; j < last_inst; j++) {
 599       _index = j;
 600       Node* nj = block->get_node(_index);
 601       // Handle machine instruction nodes
 602       if (nj->is_Mach()) {
 603         MachNode* mach = nj->as_Mach();
 604         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 605         reloc_size += mach->reloc();
 606         if (mach->is_MachCall()) {
 607           // add size information for trampoline stub
 608           // class CallStubImpl is platform-specific and defined in the *.ad files.
 609           stub_size  += CallStubImpl::size_call_trampoline();
 610           reloc_size += CallStubImpl::reloc_call_trampoline();
 611 
 612           MachCallNode *mcall = mach->as_MachCall();
 613           // This destination address is NOT PC-relative
 614 
 615           if (mcall->entry_point() != NULL) {
 616             mcall->method_set((intptr_t)mcall->entry_point());
 617           }
 618 
 619           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 620             stub_size  += CompiledStaticCall::to_interp_stub_size();
 621             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 622           }
 623         } else if (mach->is_MachSafePoint()) {
 624           // If call/safepoint are adjacent, account for possible
 625           // nop to disambiguate the two safepoints.
 626           // ScheduleAndBundle() can rearrange nodes in a block,
 627           // check for all offsets inside this block.
 628           if (last_call_adr >= blk_starts[i]) {
 629             blk_size += nop_size;
 630           }
 631         }
 632         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 633           // Nop is inserted between "avoid back to back" instructions.
 634           // ScheduleAndBundle() can rearrange nodes in a block,
 635           // check for all offsets inside this block.
 636           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 637             blk_size += nop_size;

 852     // New functionality:
 853     //   Assert if the local is not top. In product mode let the new node
 854     //   override the old entry.
 855     assert(local == C->top(), "LocArray collision");
 856     if (local == C->top()) {
 857       return;
 858     }
 859     array->pop();
 860   }
 861   const Type *t = local->bottom_type();
 862 
 863   // Is it a safepoint scalar object node?
 864   if (local->is_SafePointScalarObject()) {
 865     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 866 
 867     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 868     if (sv == NULL) {
 869       ciKlass* cik = t->is_oopptr()->klass();
 870       assert(cik->is_instance_klass() ||
 871              cik->is_array_klass(), "Not supported allocation.");
 872       uint first_ind = spobj->first_index(sfpt->jvms());
 873       // Nullable, scalarized inline types have an is_init input
 874       // that needs to be checked before using the field values.
 875       ScopeValue* is_init = NULL;
 876       if (cik->is_inlinetype()) {
 877         Node* init_node = sfpt->in(first_ind++);
 878         assert(init_node != NULL, "is_init node not found");
 879         if (!init_node->is_top()) {
 880           const TypeInt* init_type = init_node->bottom_type()->is_int();
 881           if (init_node->is_Con()) {
 882             is_init = new ConstantIntValue(init_type->get_con());
 883           } else {
 884             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 885             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 886           }
 887         }
 888       }
 889       ScopeValue* klass_sv = new ConstantOopWriteValue(cik->java_mirror()->constant_encoding());
 890       sv = spobj->is_auto_box() ? new AutoBoxObjectValue(spobj->_idx, klass_sv)
 891                                     : new ObjectValue(spobj->_idx, klass_sv, is_init);
 892       set_sv_for_object_node(objs, sv);
 893 

 894       for (uint i = 0; i < spobj->n_fields(); i++) {
 895         Node* fld_node = sfpt->in(first_ind+i);
 896         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 897       }
 898     }
 899     array->append(sv);
 900     return;
 901   }
 902 
 903   // Grab the register number for the local
 904   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 905   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 906     // Record the double as two float registers.
 907     // The register mask for such a value always specifies two adjacent
 908     // float registers, with the lower register number even.
 909     // Normally, the allocation of high and low words to these registers
 910     // is irrelevant, because nearly all operations on register pairs
 911     // (e.g., StoreD) treat them as a single unit.
 912     // Here, we assume in addition that the words in these two registers
 913     // stored "naturally" (by operations like StoreD and double stores

1048       break;
1049   }
1050 }
1051 
1052 // Determine if this node starts a bundle
1053 bool PhaseOutput::starts_bundle(const Node *n) const {
1054   return (_node_bundling_limit > n->_idx &&
1055           _node_bundling_base[n->_idx].starts_bundle());
1056 }
1057 
1058 //--------------------------Process_OopMap_Node--------------------------------
1059 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1060   // Handle special safepoint nodes for synchronization
1061   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1062   MachCallNode      *mcall;
1063 
1064   int safepoint_pc_offset = current_offset;
1065   bool is_method_handle_invoke = false;
1066   bool is_opt_native = false;
1067   bool return_oop = false;
1068   bool return_scalarized = false;
1069   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1070   bool arg_escape = false;
1071 
1072   // Add the safepoint in the DebugInfoRecorder
1073   if( !mach->is_MachCall() ) {
1074     mcall = NULL;
1075     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1076   } else {
1077     mcall = mach->as_MachCall();
1078 
1079     // Is the call a MethodHandle call?
1080     if (mcall->is_MachCallJava()) {
1081       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1082         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1083         is_method_handle_invoke = true;
1084       }
1085       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1086     } else if (mcall->is_MachCallNative()) {
1087       is_opt_native = true;
1088     }
1089 
1090     // Check if a call returns an object.
1091     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1092       return_oop = true;
1093     }
1094     if (mcall->returns_scalarized()) {
1095       return_scalarized = true;
1096     }
1097     safepoint_pc_offset += mcall->ret_addr_offset();
1098     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1099   }
1100 
1101   // Loop over the JVMState list to add scope information
1102   // Do not skip safepoints with a NULL method, they need monitor info
1103   JVMState* youngest_jvms = sfn->jvms();
1104   int max_depth = youngest_jvms->depth();
1105 
1106   // Allocate the object pool for scalar-replaced objects -- the map from
1107   // small-integer keys (which can be recorded in the local and ostack
1108   // arrays) to descriptions of the object state.
1109   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1110 
1111   // Visit scopes from oldest to youngest.
1112   for (int depth = 1; depth <= max_depth; depth++) {
1113     JVMState* jvms = youngest_jvms->of_depth(depth);
1114     int idx;
1115     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1116     // Safepoints that do not have method() set only provide oop-map and monitor info

1202     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1203 
1204     // Make method available for all Safepoints
1205     ciMethod* scope_method = method ? method : C->method();
1206     // Describe the scope here
1207     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1208     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1209     // Now we can describe the scope.
1210     methodHandle null_mh;
1211     bool rethrow_exception = false;
1212     C->debug_info()->describe_scope(
1213       safepoint_pc_offset,
1214       null_mh,
1215       scope_method,
1216       jvms->bci(),
1217       jvms->should_reexecute(),
1218       rethrow_exception,
1219       is_method_handle_invoke,
1220       is_opt_native,
1221       return_oop,
1222       return_scalarized,
1223       has_ea_local_in_scope,
1224       arg_escape,
1225       locvals,
1226       expvals,
1227       monvals
1228     );
1229   } // End jvms loop
1230 
1231   // Mark the end of the scope set.
1232   C->debug_info()->end_safepoint(safepoint_pc_offset);
1233 }
1234 
1235 
1236 
1237 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1238 class NonSafepointEmitter {
1239     Compile*  C;
1240     JVMState* _pending_jvms;
1241     int       _pending_offset;
1242 

1579           MachNode *nop = new MachNopNode(nops_cnt);
1580           block->insert_node(nop, j++);
1581           last_inst++;
1582           C->cfg()->map_node_to_block(nop, block);
1583           // Ensure enough space.
1584           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1585           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1586             C->record_failure("CodeCache is full");
1587             return;
1588           }
1589           nop->emit(*cb, C->regalloc());
1590           cb->flush_bundle(true);
1591           current_offset = cb->insts_size();
1592         }
1593 
1594         bool observe_safepoint = is_sfn;
1595         // Remember the start of the last call in a basic block
1596         if (is_mcall) {
1597           MachCallNode *mcall = mach->as_MachCall();
1598 
1599           if (mcall->entry_point() != NULL) {
1600             // This destination address is NOT PC-relative
1601             mcall->method_set((intptr_t)mcall->entry_point());
1602           }
1603 
1604           // Save the return address
1605           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1606 
1607           observe_safepoint = mcall->guaranteed_safepoint();
1608         }
1609 
1610         // sfn will be valid whenever mcall is valid now because of inheritance
1611         if (observe_safepoint) {
1612           // Handle special safepoint nodes for synchronization
1613           if (!is_mcall) {
1614             MachSafePointNode *sfn = mach->as_MachSafePoint();
1615             // !!!!! Stubs only need an oopmap right now, so bail out
1616             if (sfn->jvms()->method() == NULL) {
1617               // Write the oopmap directly to the code blob??!!
1618               continue;
1619             }
1620           } // End synchronization
1621 
1622           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1746       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1747         node_offsets[n->_idx] = cb->insts_size();
1748       }
1749 #endif
1750       assert(!C->failing(), "Should not reach here if failing.");
1751 
1752       // "Normal" instruction case
1753       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1754       n->emit(*cb, C->regalloc());
1755       current_offset = cb->insts_size();
1756 
1757       // Above we only verified that there is enough space in the instruction section.
1758       // However, the instruction may emit stubs that cause code buffer expansion.
1759       // Bail out here if expansion failed due to a lack of code cache space.
1760       if (C->failing()) {
1761         return;
1762       }
1763 
1764       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1765              "ret_addr_offset() not within emitted code");

1766 #ifdef ASSERT
1767       uint n_size = n->size(C->regalloc());
1768       if (n_size < (current_offset-instr_offset)) {
1769         MachNode* mach = n->as_Mach();
1770         n->dump();
1771         mach->dump_format(C->regalloc(), tty);
1772         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1773         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1774         tty->print_cr(" ------------------- ");
1775         BufferBlob* blob = this->scratch_buffer_blob();
1776         address blob_begin = blob->content_begin();
1777         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1778         assert(false, "wrong size of mach node");
1779       }
1780 #endif
1781       non_safepoints.observe_instruction(n, current_offset);
1782 
1783       // mcall is last "call" that can be a safepoint
1784       // record it so we can see if a poll will directly follow it
1785       // in which case we'll need a pad to make the PcDesc sites unique

3158         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3159         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3160       }
3161     }
3162     // Do not allow defs of new derived values to float above GC
3163     // points unless the base is definitely available at the GC point.
3164 
3165     Node *m = b->get_node(i);
3166 
3167     // Add precedence edge from following safepoint to use of derived pointer
3168     if( last_safept_node != end_node &&
3169         m != last_safept_node) {
3170       for (uint k = 1; k < m->req(); k++) {
3171         const Type *t = m->in(k)->bottom_type();
3172         if( t->isa_oop_ptr() &&
3173             t->is_ptr()->offset() != 0 ) {
3174           last_safept_node->add_prec( m );
3175           break;
3176         }
3177       }
3178 
3179       // Do not allow a CheckCastPP node whose input is a raw pointer to
3180       // float past a safepoint.  This can occur when a buffered inline
3181       // type is allocated in a loop and the CheckCastPP from that
3182       // allocation is reused outside the loop.  If the use inside the
3183       // loop is scalarized the CheckCastPP will no longer be connected
3184       // to the loop safepoint.  See JDK-8264340.
3185       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3186         Node *def = m->in(1);
3187         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
3188           last_safept_node->add_prec(m);
3189         }
3190       }
3191     }
3192 
3193     if( n->jvms() ) {           // Precedence edge from derived to safept
3194       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3195       if( b->get_node(last_safept) != last_safept_node ) {
3196         last_safept = b->find_node(last_safept_node);
3197       }
3198       for( uint j=last_safept; j > i; j-- ) {
3199         Node *mach = b->get_node(j);
3200         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3201           mach->add_prec( n );
3202       }
3203       last_safept = i;
3204       last_safept_node = m;
3205     }
3206   }
3207 
3208   if (fat_proj_seen) {
3209     // Garbage collect pinch nodes that were not consumed.
3210     // They are usually created by a fat kill MachProj for a call.

3329 }
3330 #endif
3331 
3332 //-----------------------init_scratch_buffer_blob------------------------------
3333 // Construct a temporary BufferBlob and cache it for this compile.
3334 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3335   // If there is already a scratch buffer blob allocated and the
3336   // constant section is big enough, use it.  Otherwise free the
3337   // current and allocate a new one.
3338   BufferBlob* blob = scratch_buffer_blob();
3339   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3340     // Use the current blob.
3341   } else {
3342     if (blob != NULL) {
3343       BufferBlob::free(blob);
3344     }
3345 
3346     ResourceMark rm;
3347     _scratch_const_size = const_size;
3348     int size = C2Compiler::initial_code_buffer_size(const_size);
3349     if (C->has_scalarized_args()) {
3350       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3351       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3352       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3353       for (ciSignatureStream str(C->method()->signature()); !str.at_return_type(); str.next()) {
3354         if (str.is_null_free() && str.type()->as_inline_klass()->can_be_passed_as_fields()) {
3355           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3356         }
3357       }
3358     }
3359     blob = BufferBlob::create("Compile::scratch_buffer", size);
3360     // Record the buffer blob for next time.
3361     set_scratch_buffer_blob(blob);
3362     // Have we run out of code space?
3363     if (scratch_buffer_blob() == NULL) {
3364       // Let CompilerBroker disable further compilations.
3365       C->record_failure("Not enough space for scratch buffer in CodeCache");
3366       return;
3367     }
3368   }
3369 
3370   // Initialize the relocation buffers
3371   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3372   set_scratch_locs_memory(locs_buf);
3373 }
3374 
3375 
3376 //-----------------------scratch_emit_size-------------------------------------
3377 // Helper function that computes size by emitting code
3378 uint PhaseOutput::scratch_emit_size(const Node* n) {

3403   int lsize = MAX_locs_size / 3;
3404   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
3405   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
3406   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
3407   // Mark as scratch buffer.
3408   buf.consts()->set_scratch_emit();
3409   buf.insts()->set_scratch_emit();
3410   buf.stubs()->set_scratch_emit();
3411 
3412   // Do the emission.
3413 
3414   Label fakeL; // Fake label for branch instructions.
3415   Label*   saveL = NULL;
3416   uint save_bnum = 0;
3417   bool is_branch = n->is_MachBranch();
3418   if (is_branch) {
3419     MacroAssembler masm(&buf);
3420     masm.bind(fakeL);
3421     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3422     n->as_MachBranch()->label_set(&fakeL, 0);
3423   } else if (n->is_MachProlog()) {
3424     saveL = ((MachPrologNode*)n)->_verified_entry;
3425     ((MachPrologNode*)n)->_verified_entry = &fakeL;
3426   } else if (n->is_MachVEP()) {
3427     saveL = ((MachVEPNode*)n)->_verified_entry;
3428     ((MachVEPNode*)n)->_verified_entry = &fakeL;
3429   }
3430   n->emit(buf, C->regalloc());
3431 
3432   // Emitting into the scratch buffer should not fail
3433   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3434 
3435   // Restore label.
3436   if (is_branch) {
3437     n->as_MachBranch()->label_set(saveL, save_bnum);
3438   } else if (n->is_MachProlog()) {
3439     ((MachPrologNode*)n)->_verified_entry = saveL;
3440   } else if (n->is_MachVEP()) {
3441     ((MachVEPNode*)n)->_verified_entry = saveL;
3442   }
3443 
3444   // End scratch_emit_size section.
3445   set_in_scratch_emit_size(false);
3446 
3447   return buf.insts_size();
3448 }
3449 
3450 void PhaseOutput::install() {
3451   if (!C->should_install_code()) {
3452     return;
3453   } else if (C->stub_function() != NULL) {
3454     install_stub(C->stub_name());
3455   } else {
3456     install_code(C->method(),
3457                  C->entry_bci(),
3458                  CompileBroker::compiler2(),
3459                  C->has_unsafe_access(),
3460                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3461                  C->rtm_state());
3462   }

3466                                int               entry_bci,
3467                                AbstractCompiler* compiler,
3468                                bool              has_unsafe_access,
3469                                bool              has_wide_vectors,
3470                                RTMState          rtm_state) {
3471   // Check if we want to skip execution of all compiled code.
3472   {
3473 #ifndef PRODUCT
3474     if (OptoNoExecute) {
3475       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3476       return;
3477     }
3478 #endif
3479     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3480 
3481     if (C->is_osr_compilation()) {
3482       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3483       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3484     } else {
3485       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3486       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3487         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3488       }
3489       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3490         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3491       }
3492       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3493         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3494       }
3495       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3496     }
3497 
3498     C->env()->register_method(target,
3499                               entry_bci,
3500                               &_code_offsets,
3501                               _orig_pc_slot_offset_in_bytes,
3502                               code_buffer(),
3503                               frame_size_in_words(),
3504                               _oop_map_set,
3505                               &_handler_table,
3506                               &_inc_table,
3507                               compiler,
3508                               has_unsafe_access,
3509                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3510                               C->rtm_state(),
3511                               C->native_invokers());
3512 
3513     if (C->log() != NULL) { // Print code cache state into compiler log
3514       C->log()->code_cache_state();
3515     }
3516   }
3517 }
3518 void PhaseOutput::install_stub(const char* stub_name) {
3519   // Entry point will be accessed using stub_entry_point();
3520   if (code_buffer() == NULL) {
3521     Matcher::soft_match_failure();
3522   } else {
3523     if (PrintAssembly && (WizardMode || Verbose))
3524       tty->print_cr("### Stub::%s", stub_name);
3525 
3526     if (!C->failing()) {
3527       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3528 
3529       // Make the NMethod
3530       // For now we mark the frame as never safe for profile stackwalking
3531       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >