< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"

  36 #include "gc/shared/c2/barrierSetC2.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/allocation.hpp"
  39 #include "opto/ad.hpp"
  40 #include "opto/block.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/c2_MacroAssembler.hpp"
  43 #include "opto/callnode.hpp"
  44 #include "opto/cfgnode.hpp"
  45 #include "opto/locknode.hpp"
  46 #include "opto/machnode.hpp"
  47 #include "opto/node.hpp"
  48 #include "opto/optoreg.hpp"
  49 #include "opto/output.hpp"
  50 #include "opto/regalloc.hpp"
  51 #include "opto/runtime.hpp"
  52 #include "opto/subnode.hpp"
  53 #include "opto/type.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/sharedRuntime.hpp"

 270 #ifdef ASSERT
 271   Compile* const C = Compile::current();
 272   BufferBlob* const blob = C->output()->scratch_buffer_blob();
 273   int size = 0;
 274 
 275   for (int i = _safepoints.length() - 1; i >= 0; i--) {
 276     CodeBuffer cb(blob->content_begin(), C->output()->scratch_buffer_code_size());
 277     MacroAssembler masm(&cb);
 278     C2SafepointPollStub* entry = _safepoints.at(i);
 279     emit_stub(masm, entry);
 280     size += cb.insts_size();
 281   }
 282   assert(size == result, "stubs should not have variable size");
 283 #endif
 284 
 285   return result;
 286 }
 287 
 288 // Nmethod entry barrier stubs
 289 C2EntryBarrierStub* C2EntryBarrierStubTable::add_entry_barrier() {
 290   assert(_stub == NULL, "There can only be one entry barrier stub");
 291   _stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 292   return _stub;
 293 }
 294 
 295 void C2EntryBarrierStubTable::emit(CodeBuffer& cb) {
 296   if (_stub == NULL) {
 297     // No stub - nothing to do
 298     return;
 299   }
 300 
 301   C2_MacroAssembler masm(&cb);
 302   // Make sure there is enough space in the code buffer
 303   if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
 304     ciEnv::current()->record_failure("CodeCache is full");
 305     return;
 306   }

 307 
 308   intptr_t before = masm.offset();
 309   masm.emit_entry_barrier_stub(_stub);
 310   intptr_t after = masm.offset();
 311   int actual_size = (int)(after - before);
 312   int expected_size = masm.entry_barrier_stub_size();
 313   assert(actual_size == expected_size, "Estimated size is wrong, expected %d, was %d", expected_size, actual_size);

 314 }
 315 
 316 int C2EntryBarrierStubTable::estimate_stub_size() const {
 317   if (BarrierSet::barrier_set()->barrier_set_nmethod() == NULL) {
 318     // No nmethod entry barrier?
 319     return 0;
 320   }
 321 
 322   return C2_MacroAssembler::entry_barrier_stub_size();
 323 }
 324 
 325 PhaseOutput::PhaseOutput()
 326   : Phase(Phase::Output),
 327     _code_buffer("Compile::Fill_buffer"),
 328     _first_block_size(0),
 329     _handler_table(),
 330     _inc_table(),
 331     _safepoint_poll_table(),
 332     _entry_barrier_table(),
 333     _oop_map_set(NULL),
 334     _scratch_buffer_blob(NULL),
 335     _scratch_locs_memory(NULL),
 336     _scratch_const_size(-1),
 337     _in_scratch_emit_size(false),
 338     _frame_slots(0),
 339     _code_offsets(),
 340     _node_bundling_limit(0),
 341     _node_bundling_base(NULL),
 342     _orig_pc_slot(0),
 343     _orig_pc_slot_offset_in_bytes(0),
 344     _buf_sizes(),
 345     _block(NULL),
 346     _index(0) {
 347   C->set_output(this);
 348   if (C->stub_name() == NULL) {
 349     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 350   }
 351 }
 352 
 353 PhaseOutput::~PhaseOutput() {
 354   C->set_output(NULL);
 355   if (_scratch_buffer_blob != NULL) {
 356     BufferBlob::free(_scratch_buffer_blob);
 357   }
 358 }
 359 
 360 void PhaseOutput::perform_mach_node_analysis() {
 361   // Late barrier analysis must be done after schedule and bundle
 362   // Otherwise liveness based spilling will fail
 363   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 364   bs->late_barrier_analysis();
 365 
 366   pd_perform_mach_node_analysis();
 367 
 368   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 4);
 369 }
 370 
 371 // Convert Nodes to instruction bits and pass off to the VM
 372 void PhaseOutput::Output() {
 373   // RootNode goes
 374   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 375 
 376   // The number of new nodes (mostly MachNop) is proportional to
 377   // the number of java calls and inner loops which are aligned.
 378   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 379                             C->inner_loops()*(OptoLoopAlignment-1)),
 380                            "out of nodes before code generation" ) ) {
 381     return;
 382   }
 383   // Make sure I can find the Start Node
 384   Block *entry = C->cfg()->get_block(1);
 385   Block *broot = C->cfg()->get_root_block();
 386 
 387   const StartNode *start = entry->head()->as_Start();
 388 
 389   // Replace StartNode with prolog
 390   MachPrologNode *prolog = new MachPrologNode();

 391   entry->map_node(prolog, 0);
 392   C->cfg()->map_node_to_block(prolog, entry);
 393   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 394 
 395   // Virtual methods need an unverified entry point
 396 
 397   if( C->is_osr_compilation() ) {
 398     if( PoisonOSREntry ) {
 399       // TODO: Should use a ShouldNotReachHereNode...
 400       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 401     }
 402   } else {
 403     if( C->method() && !C->method()->flags().is_static() ) {
 404       // Insert unvalidated entry point
 405       C->cfg()->insert( broot, 0, new MachUEPNode() );











 406     }
 407 
 408   }
 409 
 410   // Break before main entry point
 411   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 412       (OptoBreakpoint && C->is_method_compilation())       ||
 413       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 414       (OptoBreakpointC2R && !C->method())                   ) {
 415     // checking for C->method() means that OptoBreakpoint does not apply to
 416     // runtime stubs or frame converters
 417     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 418   }
 419 
 420   // Insert epilogs before every return
 421   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 422     Block* block = C->cfg()->get_block(i);
 423     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 424       Node* m = block->end();
 425       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 426         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 427         block->add_inst(epilog);
 428         C->cfg()->map_node_to_block(epilog, block);
 429       }
 430     }
 431   }
 432 
 433   // Keeper of sizing aspects
 434   _buf_sizes = BufferSizingData();
 435 
 436   // Initialize code buffer
 437   estimate_buffer_size(_buf_sizes._const);
 438   if (C->failing()) return;
 439 
 440   // Pre-compute the length of blocks and replace
 441   // long branches with short if machine supports it.
 442   // Must be done before ScheduleAndBundle due to SPARC delay slots
 443   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 444   blk_starts[0] = 0;
 445   shorten_branches(blk_starts);
 446 

























 447   ScheduleAndBundle();
 448   if (C->failing()) {
 449     return;
 450   }
 451 
 452   perform_mach_node_analysis();
 453 
 454   // Complete sizing of codebuffer
 455   CodeBuffer* cb = init_buffer();
 456   if (cb == NULL || C->failing()) {
 457     return;
 458   }
 459 
 460   BuildOopMaps();
 461 
 462   if (C->failing())  {
 463     return;
 464   }
 465 
 466   fill_buffer(cb, blk_starts);

 587     // Sum all instruction sizes to compute block size
 588     uint last_inst = block->number_of_nodes();
 589     uint blk_size = 0;
 590     for (uint j = 0; j < last_inst; j++) {
 591       _index = j;
 592       Node* nj = block->get_node(_index);
 593       // Handle machine instruction nodes
 594       if (nj->is_Mach()) {
 595         MachNode* mach = nj->as_Mach();
 596         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 597         reloc_size += mach->reloc();
 598         if (mach->is_MachCall()) {
 599           // add size information for trampoline stub
 600           // class CallStubImpl is platform-specific and defined in the *.ad files.
 601           stub_size  += CallStubImpl::size_call_trampoline();
 602           reloc_size += CallStubImpl::reloc_call_trampoline();
 603 
 604           MachCallNode *mcall = mach->as_MachCall();
 605           // This destination address is NOT PC-relative
 606 
 607           mcall->method_set((intptr_t)mcall->entry_point());


 608 
 609           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 610             stub_size  += CompiledStaticCall::to_interp_stub_size();
 611             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 612           }
 613         } else if (mach->is_MachSafePoint()) {
 614           // If call/safepoint are adjacent, account for possible
 615           // nop to disambiguate the two safepoints.
 616           // ScheduleAndBundle() can rearrange nodes in a block,
 617           // check for all offsets inside this block.
 618           if (last_call_adr >= blk_starts[i]) {
 619             blk_size += nop_size;
 620           }
 621         }
 622         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 623           // Nop is inserted between "avoid back to back" instructions.
 624           // ScheduleAndBundle() can rearrange nodes in a block,
 625           // check for all offsets inside this block.
 626           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 627             blk_size += nop_size;

 842     // New functionality:
 843     //   Assert if the local is not top. In product mode let the new node
 844     //   override the old entry.
 845     assert(local == C->top(), "LocArray collision");
 846     if (local == C->top()) {
 847       return;
 848     }
 849     array->pop();
 850   }
 851   const Type *t = local->bottom_type();
 852 
 853   // Is it a safepoint scalar object node?
 854   if (local->is_SafePointScalarObject()) {
 855     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 856 
 857     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 858     if (sv == NULL) {
 859       ciKlass* cik = t->is_oopptr()->exact_klass();
 860       assert(cik->is_instance_klass() ||
 861              cik->is_array_klass(), "Not supported allocation.");

















 862       sv = new ObjectValue(spobj->_idx,
 863                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 864       set_sv_for_object_node(objs, sv);
 865 
 866       uint first_ind = spobj->first_index(sfpt->jvms());
 867       for (uint i = 0; i < spobj->n_fields(); i++) {
 868         Node* fld_node = sfpt->in(first_ind+i);
 869         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 870       }
 871     }
 872     array->append(sv);
 873     return;
 874   }
 875 
 876   // Grab the register number for the local
 877   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 878   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 879     // Record the double as two float registers.
 880     // The register mask for such a value always specifies two adjacent
 881     // float registers, with the lower register number even.
 882     // Normally, the allocation of high and low words to these registers
 883     // is irrelevant, because nearly all operations on register pairs
 884     // (e.g., StoreD) treat them as a single unit.
 885     // Here, we assume in addition that the words in these two registers
 886     // stored "naturally" (by operations like StoreD and double stores

1028       ShouldNotReachHere();
1029       break;
1030   }
1031 }
1032 
1033 // Determine if this node starts a bundle
1034 bool PhaseOutput::starts_bundle(const Node *n) const {
1035   return (_node_bundling_limit > n->_idx &&
1036           _node_bundling_base[n->_idx].starts_bundle());
1037 }
1038 
1039 //--------------------------Process_OopMap_Node--------------------------------
1040 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1041   // Handle special safepoint nodes for synchronization
1042   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1043   MachCallNode      *mcall;
1044 
1045   int safepoint_pc_offset = current_offset;
1046   bool is_method_handle_invoke = false;
1047   bool return_oop = false;

1048   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1049   bool arg_escape = false;
1050 
1051   // Add the safepoint in the DebugInfoRecorder
1052   if( !mach->is_MachCall() ) {
1053     mcall = NULL;
1054     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1055   } else {
1056     mcall = mach->as_MachCall();
1057 
1058     // Is the call a MethodHandle call?
1059     if (mcall->is_MachCallJava()) {
1060       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1061         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1062         is_method_handle_invoke = true;
1063       }
1064       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1065     }
1066 
1067     // Check if a call returns an object.
1068     if (mcall->returns_pointer()) {
1069       return_oop = true;
1070     }



1071     safepoint_pc_offset += mcall->ret_addr_offset();
1072     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1073   }
1074 
1075   // Loop over the JVMState list to add scope information
1076   // Do not skip safepoints with a NULL method, they need monitor info
1077   JVMState* youngest_jvms = sfn->jvms();
1078   int max_depth = youngest_jvms->depth();
1079 
1080   // Allocate the object pool for scalar-replaced objects -- the map from
1081   // small-integer keys (which can be recorded in the local and ostack
1082   // arrays) to descriptions of the object state.
1083   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1084 
1085   // Visit scopes from oldest to youngest.
1086   for (int depth = 1; depth <= max_depth; depth++) {
1087     JVMState* jvms = youngest_jvms->of_depth(depth);
1088     int idx;
1089     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1090     // Safepoints that do not have method() set only provide oop-map and monitor info

1174     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1175     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1176 
1177     // Make method available for all Safepoints
1178     ciMethod* scope_method = method ? method : C->method();
1179     // Describe the scope here
1180     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1181     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1182     // Now we can describe the scope.
1183     methodHandle null_mh;
1184     bool rethrow_exception = false;
1185     C->debug_info()->describe_scope(
1186       safepoint_pc_offset,
1187       null_mh,
1188       scope_method,
1189       jvms->bci(),
1190       jvms->should_reexecute(),
1191       rethrow_exception,
1192       is_method_handle_invoke,
1193       return_oop,

1194       has_ea_local_in_scope,
1195       arg_escape,
1196       locvals,
1197       expvals,
1198       monvals
1199     );
1200   } // End jvms loop
1201 
1202   // Mark the end of the scope set.
1203   C->debug_info()->end_safepoint(safepoint_pc_offset);
1204 }
1205 
1206 
1207 
1208 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1209 class NonSafepointEmitter {
1210     Compile*  C;
1211     JVMState* _pending_jvms;
1212     int       _pending_offset;
1213 

1551           MachNode *nop = new MachNopNode(nops_cnt);
1552           block->insert_node(nop, j++);
1553           last_inst++;
1554           C->cfg()->map_node_to_block(nop, block);
1555           // Ensure enough space.
1556           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1557           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1558             C->record_failure("CodeCache is full");
1559             return;
1560           }
1561           nop->emit(*cb, C->regalloc());
1562           cb->flush_bundle(true);
1563           current_offset = cb->insts_size();
1564         }
1565 
1566         bool observe_safepoint = is_sfn;
1567         // Remember the start of the last call in a basic block
1568         if (is_mcall) {
1569           MachCallNode *mcall = mach->as_MachCall();
1570 
1571           // This destination address is NOT PC-relative
1572           mcall->method_set((intptr_t)mcall->entry_point());


1573 
1574           // Save the return address
1575           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1576 
1577           observe_safepoint = mcall->guaranteed_safepoint();
1578         }
1579 
1580         // sfn will be valid whenever mcall is valid now because of inheritance
1581         if (observe_safepoint) {
1582           // Handle special safepoint nodes for synchronization
1583           if (!is_mcall) {
1584             MachSafePointNode *sfn = mach->as_MachSafePoint();
1585             // !!!!! Stubs only need an oopmap right now, so bail out
1586             if (sfn->jvms()->method() == NULL) {
1587               // Write the oopmap directly to the code blob??!!
1588               continue;
1589             }
1590           } // End synchronization
1591 
1592           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1716       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1717         node_offsets[n->_idx] = cb->insts_size();
1718       }
1719 #endif
1720       assert(!C->failing(), "Should not reach here if failing.");
1721 
1722       // "Normal" instruction case
1723       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1724       n->emit(*cb, C->regalloc());
1725       current_offset = cb->insts_size();
1726 
1727       // Above we only verified that there is enough space in the instruction section.
1728       // However, the instruction may emit stubs that cause code buffer expansion.
1729       // Bail out here if expansion failed due to a lack of code cache space.
1730       if (C->failing()) {
1731         return;
1732       }
1733 
1734       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1735              "ret_addr_offset() not within emitted code");
1736 
1737 #ifdef ASSERT
1738       uint n_size = n->size(C->regalloc());
1739       if (n_size < (current_offset-instr_offset)) {
1740         MachNode* mach = n->as_Mach();
1741         n->dump();
1742         mach->dump_format(C->regalloc(), tty);
1743         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1744         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1745         tty->print_cr(" ------------------- ");
1746         BufferBlob* blob = this->scratch_buffer_blob();
1747         address blob_begin = blob->content_begin();
1748         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1749         assert(false, "wrong size of mach node");
1750       }
1751 #endif
1752       non_safepoints.observe_instruction(n, current_offset);
1753 
1754       // mcall is last "call" that can be a safepoint
1755       // record it so we can see if a poll will directly follow it
1756       // in which case we'll need a pad to make the PcDesc sites unique

3106         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3107         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3108       }
3109     }
3110     // Do not allow defs of new derived values to float above GC
3111     // points unless the base is definitely available at the GC point.
3112 
3113     Node *m = b->get_node(i);
3114 
3115     // Add precedence edge from following safepoint to use of derived pointer
3116     if( last_safept_node != end_node &&
3117         m != last_safept_node) {
3118       for (uint k = 1; k < m->req(); k++) {
3119         const Type *t = m->in(k)->bottom_type();
3120         if( t->isa_oop_ptr() &&
3121             t->is_ptr()->offset() != 0 ) {
3122           last_safept_node->add_prec( m );
3123           break;
3124         }
3125       }













3126     }
3127 
3128     if( n->jvms() ) {           // Precedence edge from derived to safept
3129       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3130       if( b->get_node(last_safept) != last_safept_node ) {
3131         last_safept = b->find_node(last_safept_node);
3132       }
3133       for( uint j=last_safept; j > i; j-- ) {
3134         Node *mach = b->get_node(j);
3135         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3136           mach->add_prec( n );
3137       }
3138       last_safept = i;
3139       last_safept_node = m;
3140     }
3141   }
3142 
3143   if (fat_proj_seen) {
3144     // Garbage collect pinch nodes that were not consumed.
3145     // They are usually created by a fat kill MachProj for a call.

3264 }
3265 #endif
3266 
3267 //-----------------------init_scratch_buffer_blob------------------------------
3268 // Construct a temporary BufferBlob and cache it for this compile.
3269 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3270   // If there is already a scratch buffer blob allocated and the
3271   // constant section is big enough, use it.  Otherwise free the
3272   // current and allocate a new one.
3273   BufferBlob* blob = scratch_buffer_blob();
3274   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3275     // Use the current blob.
3276   } else {
3277     if (blob != NULL) {
3278       BufferBlob::free(blob);
3279     }
3280 
3281     ResourceMark rm;
3282     _scratch_const_size = const_size;
3283     int size = C2Compiler::initial_code_buffer_size(const_size);



















3284     blob = BufferBlob::create("Compile::scratch_buffer", size);
3285     // Record the buffer blob for next time.
3286     set_scratch_buffer_blob(blob);
3287     // Have we run out of code space?
3288     if (scratch_buffer_blob() == NULL) {
3289       // Let CompilerBroker disable further compilations.
3290       C->record_failure("Not enough space for scratch buffer in CodeCache");
3291       return;
3292     }
3293   }
3294 
3295   // Initialize the relocation buffers
3296   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3297   set_scratch_locs_memory(locs_buf);
3298 }
3299 
3300 
3301 //-----------------------scratch_emit_size-------------------------------------
3302 // Helper function that computes size by emitting code
3303 uint PhaseOutput::scratch_emit_size(const Node* n) {

3334   buf.insts()->set_scratch_emit();
3335   buf.stubs()->set_scratch_emit();
3336 
3337   // Do the emission.
3338 
3339   Label fakeL; // Fake label for branch instructions.
3340   Label*   saveL = NULL;
3341   uint save_bnum = 0;
3342   bool is_branch = n->is_MachBranch();
3343   if (is_branch) {
3344     MacroAssembler masm(&buf);
3345     masm.bind(fakeL);
3346     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3347     n->as_MachBranch()->label_set(&fakeL, 0);
3348   }
3349   n->emit(buf, C->regalloc());
3350 
3351   // Emitting into the scratch buffer should not fail
3352   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3353 
3354   if (is_branch) // Restore label.

3355     n->as_MachBranch()->label_set(saveL, save_bnum);

3356 
3357   // End scratch_emit_size section.
3358   set_in_scratch_emit_size(false);
3359 
3360   return buf.insts_size();
3361 }
3362 
3363 void PhaseOutput::install() {
3364   if (!C->should_install_code()) {
3365     return;
3366   } else if (C->stub_function() != NULL) {
3367     install_stub(C->stub_name());
3368   } else {
3369     install_code(C->method(),
3370                  C->entry_bci(),
3371                  CompileBroker::compiler2(),
3372                  C->has_unsafe_access(),
3373                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3374                  C->rtm_state());
3375   }

3379                                int               entry_bci,
3380                                AbstractCompiler* compiler,
3381                                bool              has_unsafe_access,
3382                                bool              has_wide_vectors,
3383                                RTMState          rtm_state) {
3384   // Check if we want to skip execution of all compiled code.
3385   {
3386 #ifndef PRODUCT
3387     if (OptoNoExecute) {
3388       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3389       return;
3390     }
3391 #endif
3392     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3393 
3394     if (C->is_osr_compilation()) {
3395       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3396       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3397     } else {
3398       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3399       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3400     }
3401 
3402     C->env()->register_method(target,
3403                                      entry_bci,
3404                                      &_code_offsets,
3405                                      _orig_pc_slot_offset_in_bytes,
3406                                      code_buffer(),
3407                                      frame_size_in_words(),
3408                                      oop_map_set(),
3409                                      &_handler_table,
3410                                      inc_table(),
3411                                      compiler,
3412                                      has_unsafe_access,
3413                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3414                                      C->has_monitors(),
3415                                      0,
3416                                      C->rtm_state());
3417 
3418     if (C->log() != NULL) { // Print code cache state into compiler log
3419       C->log()->code_cache_state();
3420     }
3421   }
3422 }
3423 void PhaseOutput::install_stub(const char* stub_name) {
3424   // Entry point will be accessed using stub_entry_point();
3425   if (code_buffer() == NULL) {
3426     Matcher::soft_match_failure();
3427   } else {
3428     if (PrintAssembly && (WizardMode || Verbose))
3429       tty->print_cr("### Stub::%s", stub_name);
3430 
3431     if (!C->failing()) {
3432       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3433 
3434       // Make the NMethod
3435       // For now we mark the frame as never safe for profile stackwalking
3436       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/debugInfo.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "compiler/compileBroker.hpp"
  32 #include "compiler/compilerDirectives.hpp"
  33 #include "compiler/disassembler.hpp"
  34 #include "compiler/oopMap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "opto/ad.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/c2_MacroAssembler.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/cfgnode.hpp"
  46 #include "opto/locknode.hpp"
  47 #include "opto/machnode.hpp"
  48 #include "opto/node.hpp"
  49 #include "opto/optoreg.hpp"
  50 #include "opto/output.hpp"
  51 #include "opto/regalloc.hpp"
  52 #include "opto/runtime.hpp"
  53 #include "opto/subnode.hpp"
  54 #include "opto/type.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/sharedRuntime.hpp"

 271 #ifdef ASSERT
 272   Compile* const C = Compile::current();
 273   BufferBlob* const blob = C->output()->scratch_buffer_blob();
 274   int size = 0;
 275 
 276   for (int i = _safepoints.length() - 1; i >= 0; i--) {
 277     CodeBuffer cb(blob->content_begin(), C->output()->scratch_buffer_code_size());
 278     MacroAssembler masm(&cb);
 279     C2SafepointPollStub* entry = _safepoints.at(i);
 280     emit_stub(masm, entry);
 281     size += cb.insts_size();
 282   }
 283   assert(size == result, "stubs should not have variable size");
 284 #endif
 285 
 286   return result;
 287 }
 288 
 289 // Nmethod entry barrier stubs
 290 C2EntryBarrierStub* C2EntryBarrierStubTable::add_entry_barrier() {
 291   C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 292   _stubs.append(stub);
 293   return stub;
 294 }
 295 
 296 void C2EntryBarrierStubTable::emit(CodeBuffer& cb) {
 297   if (_stubs.is_empty()) {
 298     // No stub - nothing to do
 299     return;
 300   }
 301 
 302   C2_MacroAssembler masm(&cb);
 303   for (C2EntryBarrierStub* stub : _stubs) {
 304     // Make sure there is enough space in the code buffer
 305     if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
 306       ciEnv::current()->record_failure("CodeCache is full");
 307       return;
 308     }
 309 
 310     intptr_t before = masm.offset();
 311     masm.emit_entry_barrier_stub(stub);
 312     intptr_t after = masm.offset();
 313     int actual_size = (int)(after - before);
 314     int expected_size = masm.entry_barrier_stub_size();
 315     assert(actual_size == expected_size, "Estimated size is wrong, expected %d, was %d", expected_size, actual_size);
 316   }
 317 }
 318 
 319 int C2EntryBarrierStubTable::estimate_stub_size() const {
 320   if (BarrierSet::barrier_set()->barrier_set_nmethod() == NULL) {
 321     // No nmethod entry barrier?
 322     return 0;
 323   }
 324 
 325   return C2_MacroAssembler::entry_barrier_stub_size();
 326 }
 327 
 328 PhaseOutput::PhaseOutput()
 329   : Phase(Phase::Output),
 330     _code_buffer("Compile::Fill_buffer"),
 331     _first_block_size(0),
 332     _handler_table(),
 333     _inc_table(),
 334     _safepoint_poll_table(),
 335     _entry_barrier_table(),
 336     _oop_map_set(NULL),
 337     _scratch_buffer_blob(NULL),
 338     _scratch_locs_memory(NULL),
 339     _scratch_const_size(-1),
 340     _in_scratch_emit_size(false),
 341     _frame_slots(0),
 342     _code_offsets(),
 343     _node_bundling_limit(0),
 344     _node_bundling_base(NULL),
 345     _orig_pc_slot(0),
 346     _orig_pc_slot_offset_in_bytes(0),
 347     _buf_sizes(),
 348     _block(NULL),
 349     _index(0) {
 350   C->set_output(this);
 351   if (C->stub_name() == NULL) {
 352     int fixed_slots = C->fixed_slots();
 353     if (C->needs_stack_repair()) {
 354       fixed_slots -= 2;
 355     }
 356     // TODO 8284443 Only reserve extra slot if needed
 357     if (InlineTypeReturnedAsFields) {
 358       fixed_slots -= 2;
 359     }
 360     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 361   }
 362 }
 363 
 364 PhaseOutput::~PhaseOutput() {
 365   C->set_output(NULL);
 366   if (_scratch_buffer_blob != NULL) {
 367     BufferBlob::free(_scratch_buffer_blob);
 368   }
 369 }
 370 
 371 void PhaseOutput::perform_mach_node_analysis() {
 372   // Late barrier analysis must be done after schedule and bundle
 373   // Otherwise liveness based spilling will fail
 374   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 375   bs->late_barrier_analysis();
 376 
 377   pd_perform_mach_node_analysis();
 378 
 379   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 4);
 380 }
 381 
 382 // Convert Nodes to instruction bits and pass off to the VM
 383 void PhaseOutput::Output() {
 384   // RootNode goes
 385   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 386 
 387   // The number of new nodes (mostly MachNop) is proportional to
 388   // the number of java calls and inner loops which are aligned.
 389   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 390                             C->inner_loops()*(OptoLoopAlignment-1)),
 391                            "out of nodes before code generation" ) ) {
 392     return;
 393   }
 394   // Make sure I can find the Start Node
 395   Block *entry = C->cfg()->get_block(1);
 396   Block *broot = C->cfg()->get_root_block();
 397 
 398   const StartNode *start = entry->head()->as_Start();
 399 
 400   // Replace StartNode with prolog
 401   Label verified_entry;
 402   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 403   entry->map_node(prolog, 0);
 404   C->cfg()->map_node_to_block(prolog, entry);
 405   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 406 
 407   // Virtual methods need an unverified entry point
 408   if (C->is_osr_compilation()) {
 409     if (PoisonOSREntry) {

 410       // TODO: Should use a ShouldNotReachHereNode...
 411       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 412     }
 413   } else {
 414     if (C->method()) {
 415       if (C->method()->has_scalarized_args()) {
 416         // Add entry point to unpack all inline type arguments
 417         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 418         if (!C->method()->is_static()) {
 419           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 420           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 421           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 422           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 423         }
 424       } else if (!C->method()->is_static()) {
 425         // Insert unvalidated entry point
 426         C->cfg()->insert(broot, 0, new MachUEPNode());
 427       }
 428     }

 429   }
 430 
 431   // Break before main entry point
 432   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 433       (OptoBreakpoint && C->is_method_compilation())       ||
 434       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 435       (OptoBreakpointC2R && !C->method())                   ) {
 436     // checking for C->method() means that OptoBreakpoint does not apply to
 437     // runtime stubs or frame converters
 438     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 439   }
 440 
 441   // Insert epilogs before every return
 442   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 443     Block* block = C->cfg()->get_block(i);
 444     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 445       Node* m = block->end();
 446       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 447         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 448         block->add_inst(epilog);
 449         C->cfg()->map_node_to_block(epilog, block);
 450       }
 451     }
 452   }
 453 
 454   // Keeper of sizing aspects
 455   _buf_sizes = BufferSizingData();
 456 
 457   // Initialize code buffer
 458   estimate_buffer_size(_buf_sizes._const);
 459   if (C->failing()) return;
 460 
 461   // Pre-compute the length of blocks and replace
 462   // long branches with short if machine supports it.
 463   // Must be done before ScheduleAndBundle due to SPARC delay slots
 464   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 465   blk_starts[0] = 0;
 466   shorten_branches(blk_starts);
 467 
 468   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 469     // Compute the offsets of the entry points required by the inline type calling convention
 470     if (!C->method()->is_static()) {
 471       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 472       // Entry                     (unverified) @ offset 0
 473       // Verified_Inline_Entry_RO
 474       // Inline_Entry              (unverified)
 475       // Verified_Inline_Entry
 476       uint offset = 0;
 477       _code_offsets.set_value(CodeOffsets::Entry, offset);
 478 
 479       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 480       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 481 
 482       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 483       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 484 
 485       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 486       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 487     } else {
 488       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 489       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 490     }
 491   }
 492 
 493   ScheduleAndBundle();
 494   if (C->failing()) {
 495     return;
 496   }
 497 
 498   perform_mach_node_analysis();
 499 
 500   // Complete sizing of codebuffer
 501   CodeBuffer* cb = init_buffer();
 502   if (cb == NULL || C->failing()) {
 503     return;
 504   }
 505 
 506   BuildOopMaps();
 507 
 508   if (C->failing())  {
 509     return;
 510   }
 511 
 512   fill_buffer(cb, blk_starts);

 633     // Sum all instruction sizes to compute block size
 634     uint last_inst = block->number_of_nodes();
 635     uint blk_size = 0;
 636     for (uint j = 0; j < last_inst; j++) {
 637       _index = j;
 638       Node* nj = block->get_node(_index);
 639       // Handle machine instruction nodes
 640       if (nj->is_Mach()) {
 641         MachNode* mach = nj->as_Mach();
 642         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 643         reloc_size += mach->reloc();
 644         if (mach->is_MachCall()) {
 645           // add size information for trampoline stub
 646           // class CallStubImpl is platform-specific and defined in the *.ad files.
 647           stub_size  += CallStubImpl::size_call_trampoline();
 648           reloc_size += CallStubImpl::reloc_call_trampoline();
 649 
 650           MachCallNode *mcall = mach->as_MachCall();
 651           // This destination address is NOT PC-relative
 652 
 653           if (mcall->entry_point() != NULL) {
 654             mcall->method_set((intptr_t)mcall->entry_point());
 655           }
 656 
 657           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 658             stub_size  += CompiledStaticCall::to_interp_stub_size();
 659             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 660           }
 661         } else if (mach->is_MachSafePoint()) {
 662           // If call/safepoint are adjacent, account for possible
 663           // nop to disambiguate the two safepoints.
 664           // ScheduleAndBundle() can rearrange nodes in a block,
 665           // check for all offsets inside this block.
 666           if (last_call_adr >= blk_starts[i]) {
 667             blk_size += nop_size;
 668           }
 669         }
 670         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 671           // Nop is inserted between "avoid back to back" instructions.
 672           // ScheduleAndBundle() can rearrange nodes in a block,
 673           // check for all offsets inside this block.
 674           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 675             blk_size += nop_size;

 890     // New functionality:
 891     //   Assert if the local is not top. In product mode let the new node
 892     //   override the old entry.
 893     assert(local == C->top(), "LocArray collision");
 894     if (local == C->top()) {
 895       return;
 896     }
 897     array->pop();
 898   }
 899   const Type *t = local->bottom_type();
 900 
 901   // Is it a safepoint scalar object node?
 902   if (local->is_SafePointScalarObject()) {
 903     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 904 
 905     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 906     if (sv == NULL) {
 907       ciKlass* cik = t->is_oopptr()->exact_klass();
 908       assert(cik->is_instance_klass() ||
 909              cik->is_array_klass(), "Not supported allocation.");
 910       uint first_ind = spobj->first_index(sfpt->jvms());
 911       // Nullable, scalarized inline types have an is_init input
 912       // that needs to be checked before using the field values.
 913       ScopeValue* is_init = NULL;
 914       if (cik->is_inlinetype()) {
 915         Node* init_node = sfpt->in(first_ind++);
 916         assert(init_node != NULL, "is_init node not found");
 917         if (!init_node->is_top()) {
 918           const TypeInt* init_type = init_node->bottom_type()->is_int();
 919           if (init_node->is_Con()) {
 920             is_init = new ConstantIntValue(init_type->get_con());
 921           } else {
 922             OptoReg::Name init_reg = C->regalloc()->get_reg_first(init_node);
 923             is_init = new_loc_value(C->regalloc(), init_reg, Location::normal);
 924           }
 925         }
 926       }
 927       sv = new ObjectValue(spobj->_idx,
 928                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), is_init);
 929       set_sv_for_object_node(objs, sv);
 930 

 931       for (uint i = 0; i < spobj->n_fields(); i++) {
 932         Node* fld_node = sfpt->in(first_ind+i);
 933         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 934       }
 935     }
 936     array->append(sv);
 937     return;
 938   }
 939 
 940   // Grab the register number for the local
 941   OptoReg::Name regnum = C->regalloc()->get_reg_first(local);
 942   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
 943     // Record the double as two float registers.
 944     // The register mask for such a value always specifies two adjacent
 945     // float registers, with the lower register number even.
 946     // Normally, the allocation of high and low words to these registers
 947     // is irrelevant, because nearly all operations on register pairs
 948     // (e.g., StoreD) treat them as a single unit.
 949     // Here, we assume in addition that the words in these two registers
 950     // stored "naturally" (by operations like StoreD and double stores

1092       ShouldNotReachHere();
1093       break;
1094   }
1095 }
1096 
1097 // Determine if this node starts a bundle
1098 bool PhaseOutput::starts_bundle(const Node *n) const {
1099   return (_node_bundling_limit > n->_idx &&
1100           _node_bundling_base[n->_idx].starts_bundle());
1101 }
1102 
1103 //--------------------------Process_OopMap_Node--------------------------------
1104 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1105   // Handle special safepoint nodes for synchronization
1106   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1107   MachCallNode      *mcall;
1108 
1109   int safepoint_pc_offset = current_offset;
1110   bool is_method_handle_invoke = false;
1111   bool return_oop = false;
1112   bool return_scalarized = false;
1113   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1114   bool arg_escape = false;
1115 
1116   // Add the safepoint in the DebugInfoRecorder
1117   if( !mach->is_MachCall() ) {
1118     mcall = NULL;
1119     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1120   } else {
1121     mcall = mach->as_MachCall();
1122 
1123     // Is the call a MethodHandle call?
1124     if (mcall->is_MachCallJava()) {
1125       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1126         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1127         is_method_handle_invoke = true;
1128       }
1129       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1130     }
1131 
1132     // Check if a call returns an object.
1133     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1134       return_oop = true;
1135     }
1136     if (mcall->returns_scalarized()) {
1137       return_scalarized = true;
1138     }
1139     safepoint_pc_offset += mcall->ret_addr_offset();
1140     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1141   }
1142 
1143   // Loop over the JVMState list to add scope information
1144   // Do not skip safepoints with a NULL method, they need monitor info
1145   JVMState* youngest_jvms = sfn->jvms();
1146   int max_depth = youngest_jvms->depth();
1147 
1148   // Allocate the object pool for scalar-replaced objects -- the map from
1149   // small-integer keys (which can be recorded in the local and ostack
1150   // arrays) to descriptions of the object state.
1151   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1152 
1153   // Visit scopes from oldest to youngest.
1154   for (int depth = 1; depth <= max_depth; depth++) {
1155     JVMState* jvms = youngest_jvms->of_depth(depth);
1156     int idx;
1157     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
1158     // Safepoints that do not have method() set only provide oop-map and monitor info

1242     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1243     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1244 
1245     // Make method available for all Safepoints
1246     ciMethod* scope_method = method ? method : C->method();
1247     // Describe the scope here
1248     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1249     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1250     // Now we can describe the scope.
1251     methodHandle null_mh;
1252     bool rethrow_exception = false;
1253     C->debug_info()->describe_scope(
1254       safepoint_pc_offset,
1255       null_mh,
1256       scope_method,
1257       jvms->bci(),
1258       jvms->should_reexecute(),
1259       rethrow_exception,
1260       is_method_handle_invoke,
1261       return_oop,
1262       return_scalarized,
1263       has_ea_local_in_scope,
1264       arg_escape,
1265       locvals,
1266       expvals,
1267       monvals
1268     );
1269   } // End jvms loop
1270 
1271   // Mark the end of the scope set.
1272   C->debug_info()->end_safepoint(safepoint_pc_offset);
1273 }
1274 
1275 
1276 
1277 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1278 class NonSafepointEmitter {
1279     Compile*  C;
1280     JVMState* _pending_jvms;
1281     int       _pending_offset;
1282 

1620           MachNode *nop = new MachNopNode(nops_cnt);
1621           block->insert_node(nop, j++);
1622           last_inst++;
1623           C->cfg()->map_node_to_block(nop, block);
1624           // Ensure enough space.
1625           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1626           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1627             C->record_failure("CodeCache is full");
1628             return;
1629           }
1630           nop->emit(*cb, C->regalloc());
1631           cb->flush_bundle(true);
1632           current_offset = cb->insts_size();
1633         }
1634 
1635         bool observe_safepoint = is_sfn;
1636         // Remember the start of the last call in a basic block
1637         if (is_mcall) {
1638           MachCallNode *mcall = mach->as_MachCall();
1639 
1640           if (mcall->entry_point() != NULL) {
1641             // This destination address is NOT PC-relative
1642             mcall->method_set((intptr_t)mcall->entry_point());
1643           }
1644 
1645           // Save the return address
1646           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1647 
1648           observe_safepoint = mcall->guaranteed_safepoint();
1649         }
1650 
1651         // sfn will be valid whenever mcall is valid now because of inheritance
1652         if (observe_safepoint) {
1653           // Handle special safepoint nodes for synchronization
1654           if (!is_mcall) {
1655             MachSafePointNode *sfn = mach->as_MachSafePoint();
1656             // !!!!! Stubs only need an oopmap right now, so bail out
1657             if (sfn->jvms()->method() == NULL) {
1658               // Write the oopmap directly to the code blob??!!
1659               continue;
1660             }
1661           } // End synchronization
1662 
1663           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1787       if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
1788         node_offsets[n->_idx] = cb->insts_size();
1789       }
1790 #endif
1791       assert(!C->failing(), "Should not reach here if failing.");
1792 
1793       // "Normal" instruction case
1794       DEBUG_ONLY(uint instr_offset = cb->insts_size());
1795       n->emit(*cb, C->regalloc());
1796       current_offset = cb->insts_size();
1797 
1798       // Above we only verified that there is enough space in the instruction section.
1799       // However, the instruction may emit stubs that cause code buffer expansion.
1800       // Bail out here if expansion failed due to a lack of code cache space.
1801       if (C->failing()) {
1802         return;
1803       }
1804 
1805       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1806              "ret_addr_offset() not within emitted code");

1807 #ifdef ASSERT
1808       uint n_size = n->size(C->regalloc());
1809       if (n_size < (current_offset-instr_offset)) {
1810         MachNode* mach = n->as_Mach();
1811         n->dump();
1812         mach->dump_format(C->regalloc(), tty);
1813         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1814         Disassembler::decode(cb->insts_begin() + instr_offset, cb->insts_begin() + current_offset + 1, tty);
1815         tty->print_cr(" ------------------- ");
1816         BufferBlob* blob = this->scratch_buffer_blob();
1817         address blob_begin = blob->content_begin();
1818         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1819         assert(false, "wrong size of mach node");
1820       }
1821 #endif
1822       non_safepoints.observe_instruction(n, current_offset);
1823 
1824       // mcall is last "call" that can be a safepoint
1825       // record it so we can see if a poll will directly follow it
1826       // in which case we'll need a pad to make the PcDesc sites unique

3176         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3177         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3178       }
3179     }
3180     // Do not allow defs of new derived values to float above GC
3181     // points unless the base is definitely available at the GC point.
3182 
3183     Node *m = b->get_node(i);
3184 
3185     // Add precedence edge from following safepoint to use of derived pointer
3186     if( last_safept_node != end_node &&
3187         m != last_safept_node) {
3188       for (uint k = 1; k < m->req(); k++) {
3189         const Type *t = m->in(k)->bottom_type();
3190         if( t->isa_oop_ptr() &&
3191             t->is_ptr()->offset() != 0 ) {
3192           last_safept_node->add_prec( m );
3193           break;
3194         }
3195       }
3196 
3197       // Do not allow a CheckCastPP node whose input is a raw pointer to
3198       // float past a safepoint.  This can occur when a buffered inline
3199       // type is allocated in a loop and the CheckCastPP from that
3200       // allocation is reused outside the loop.  If the use inside the
3201       // loop is scalarized the CheckCastPP will no longer be connected
3202       // to the loop safepoint.  See JDK-8264340.
3203       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3204         Node *def = m->in(1);
3205         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
3206           last_safept_node->add_prec(m);
3207         }
3208       }
3209     }
3210 
3211     if( n->jvms() ) {           // Precedence edge from derived to safept
3212       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3213       if( b->get_node(last_safept) != last_safept_node ) {
3214         last_safept = b->find_node(last_safept_node);
3215       }
3216       for( uint j=last_safept; j > i; j-- ) {
3217         Node *mach = b->get_node(j);
3218         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3219           mach->add_prec( n );
3220       }
3221       last_safept = i;
3222       last_safept_node = m;
3223     }
3224   }
3225 
3226   if (fat_proj_seen) {
3227     // Garbage collect pinch nodes that were not consumed.
3228     // They are usually created by a fat kill MachProj for a call.

3347 }
3348 #endif
3349 
3350 //-----------------------init_scratch_buffer_blob------------------------------
3351 // Construct a temporary BufferBlob and cache it for this compile.
3352 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3353   // If there is already a scratch buffer blob allocated and the
3354   // constant section is big enough, use it.  Otherwise free the
3355   // current and allocate a new one.
3356   BufferBlob* blob = scratch_buffer_blob();
3357   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
3358     // Use the current blob.
3359   } else {
3360     if (blob != NULL) {
3361       BufferBlob::free(blob);
3362     }
3363 
3364     ResourceMark rm;
3365     _scratch_const_size = const_size;
3366     int size = C2Compiler::initial_code_buffer_size(const_size);
3367     if (C->has_scalarized_args()) {
3368       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3369       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3370       ciMethod* method = C->method();
3371       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3372       int arg_num = 0;
3373       if (!method->is_static()) {
3374         if (method->is_scalarized_arg(arg_num)) {
3375           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3376         }
3377         arg_num++;
3378       }
3379       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3380         if (method->is_scalarized_arg(arg_num)) {
3381           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3382         }
3383         arg_num++;
3384       }
3385     }
3386     blob = BufferBlob::create("Compile::scratch_buffer", size);
3387     // Record the buffer blob for next time.
3388     set_scratch_buffer_blob(blob);
3389     // Have we run out of code space?
3390     if (scratch_buffer_blob() == NULL) {
3391       // Let CompilerBroker disable further compilations.
3392       C->record_failure("Not enough space for scratch buffer in CodeCache");
3393       return;
3394     }
3395   }
3396 
3397   // Initialize the relocation buffers
3398   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3399   set_scratch_locs_memory(locs_buf);
3400 }
3401 
3402 
3403 //-----------------------scratch_emit_size-------------------------------------
3404 // Helper function that computes size by emitting code
3405 uint PhaseOutput::scratch_emit_size(const Node* n) {

3436   buf.insts()->set_scratch_emit();
3437   buf.stubs()->set_scratch_emit();
3438 
3439   // Do the emission.
3440 
3441   Label fakeL; // Fake label for branch instructions.
3442   Label*   saveL = NULL;
3443   uint save_bnum = 0;
3444   bool is_branch = n->is_MachBranch();
3445   if (is_branch) {
3446     MacroAssembler masm(&buf);
3447     masm.bind(fakeL);
3448     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3449     n->as_MachBranch()->label_set(&fakeL, 0);
3450   }
3451   n->emit(buf, C->regalloc());
3452 
3453   // Emitting into the scratch buffer should not fail
3454   assert (!C->failing(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3455 
3456   // Restore label.
3457   if (is_branch) {
3458     n->as_MachBranch()->label_set(saveL, save_bnum);
3459   }
3460 
3461   // End scratch_emit_size section.
3462   set_in_scratch_emit_size(false);
3463 
3464   return buf.insts_size();
3465 }
3466 
3467 void PhaseOutput::install() {
3468   if (!C->should_install_code()) {
3469     return;
3470   } else if (C->stub_function() != NULL) {
3471     install_stub(C->stub_name());
3472   } else {
3473     install_code(C->method(),
3474                  C->entry_bci(),
3475                  CompileBroker::compiler2(),
3476                  C->has_unsafe_access(),
3477                  SharedRuntime::is_wide_vector(C->max_vector_size()),
3478                  C->rtm_state());
3479   }

3483                                int               entry_bci,
3484                                AbstractCompiler* compiler,
3485                                bool              has_unsafe_access,
3486                                bool              has_wide_vectors,
3487                                RTMState          rtm_state) {
3488   // Check if we want to skip execution of all compiled code.
3489   {
3490 #ifndef PRODUCT
3491     if (OptoNoExecute) {
3492       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3493       return;
3494     }
3495 #endif
3496     Compile::TracePhase tp("install_code", &timers[_t_registerMethod]);
3497 
3498     if (C->is_osr_compilation()) {
3499       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3500       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3501     } else {
3502       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3503       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3504         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3505       }
3506       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3507         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3508       }
3509       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3510         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3511       }
3512       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3513     }
3514 
3515     C->env()->register_method(target,
3516                               entry_bci,
3517                               &_code_offsets,
3518                               _orig_pc_slot_offset_in_bytes,
3519                               code_buffer(),
3520                               frame_size_in_words(),
3521                               _oop_map_set,
3522                               &_handler_table,
3523                               inc_table(),
3524                               compiler,
3525                               has_unsafe_access,
3526                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3527                               C->has_monitors(),
3528                               0,
3529                               C->rtm_state());
3530 
3531     if (C->log() != NULL) { // Print code cache state into compiler log
3532       C->log()->code_cache_state();
3533     }
3534   }
3535 }
3536 void PhaseOutput::install_stub(const char* stub_name) {
3537   // Entry point will be accessed using stub_entry_point();
3538   if (code_buffer() == NULL) {
3539     Matcher::soft_match_failure();
3540   } else {
3541     if (PrintAssembly && (WizardMode || Verbose))
3542       tty->print_cr("### Stub::%s", stub_name);
3543 
3544     if (!C->failing()) {
3545       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3546 
3547       // Make the NMethod
3548       // For now we mark the frame as never safe for profile stackwalking
3549       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >