< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"

  36 #include "memory/allocation.hpp"
  37 #include "opto/ad.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/c2_MacroAssembler.hpp"
  40 #include "opto/c2compiler.hpp"
  41 #include "opto/callnode.hpp"
  42 #include "opto/cfgnode.hpp"
  43 #include "opto/locknode.hpp"
  44 #include "opto/machnode.hpp"
  45 #include "opto/node.hpp"
  46 #include "opto/optoreg.hpp"
  47 #include "opto/output.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/type.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/macros.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 #include "utilities/xmlstream.hpp"
  54 
  55 #ifndef PRODUCT

 212     _first_block_size(0),
 213     _handler_table(),
 214     _inc_table(),
 215     _stub_list(),
 216     _oop_map_set(nullptr),
 217     _scratch_buffer_blob(nullptr),
 218     _scratch_locs_memory(nullptr),
 219     _scratch_const_size(-1),
 220     _in_scratch_emit_size(false),
 221     _frame_slots(0),
 222     _code_offsets(),
 223     _node_bundling_limit(0),
 224     _node_bundling_base(nullptr),
 225     _orig_pc_slot(0),
 226     _orig_pc_slot_offset_in_bytes(0),
 227     _buf_sizes(),
 228     _block(nullptr),
 229     _index(0) {
 230   C->set_output(this);
 231   if (C->stub_name() == nullptr) {
 232     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 233   }
 234 }
 235 
 236 PhaseOutput::~PhaseOutput() {
 237   C->set_output(nullptr);
 238   if (_scratch_buffer_blob != nullptr) {
 239     BufferBlob::free(_scratch_buffer_blob);
 240   }
 241 }
 242 
 243 void PhaseOutput::perform_mach_node_analysis() {
 244   // Late barrier analysis must be done after schedule and bundle
 245   // Otherwise liveness based spilling will fail
 246   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 247   bs->late_barrier_analysis();
 248 
 249   pd_perform_mach_node_analysis();
 250 
 251   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 252 }
 253 
 254 // Convert Nodes to instruction bits and pass off to the VM
 255 void PhaseOutput::Output() {
 256   // RootNode goes
 257   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 258 
 259   // The number of new nodes (mostly MachNop) is proportional to
 260   // the number of java calls and inner loops which are aligned.
 261   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 262                             C->inner_loops()*(OptoLoopAlignment-1)),
 263                            "out of nodes before code generation" ) ) {
 264     return;
 265   }
 266   // Make sure I can find the Start Node
 267   Block *entry = C->cfg()->get_block(1);
 268   Block *broot = C->cfg()->get_root_block();
 269 
 270   const StartNode *start = entry->head()->as_Start();
 271 
 272   // Replace StartNode with prolog
 273   MachPrologNode *prolog = new MachPrologNode();

 274   entry->map_node(prolog, 0);
 275   C->cfg()->map_node_to_block(prolog, entry);
 276   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 277 
 278   // Virtual methods need an unverified entry point
 279 
 280   if( C->is_osr_compilation() ) {
 281     if( PoisonOSREntry ) {
 282       // TODO: Should use a ShouldNotReachHereNode...
 283       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 284     }
 285   } else {
 286     if( C->method() && !C->method()->flags().is_static() ) {
 287       // Insert unvalidated entry point
 288       C->cfg()->insert( broot, 0, new MachUEPNode() );











 289     }
 290 
 291   }
 292 
 293   // Break before main entry point
 294   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 295       (OptoBreakpoint && C->is_method_compilation())       ||
 296       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 297       (OptoBreakpointC2R && !C->method())                   ) {
 298     // checking for C->method() means that OptoBreakpoint does not apply to
 299     // runtime stubs or frame converters
 300     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 301   }
 302 
 303   // Insert epilogs before every return
 304   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 305     Block* block = C->cfg()->get_block(i);
 306     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 307       Node* m = block->end();
 308       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 309         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 310         block->add_inst(epilog);
 311         C->cfg()->map_node_to_block(epilog, block);
 312       }
 313     }
 314   }
 315 
 316   // Keeper of sizing aspects
 317   _buf_sizes = BufferSizingData();
 318 
 319   // Initialize code buffer
 320   estimate_buffer_size(_buf_sizes._const);
 321   if (C->failing()) return;
 322 
 323   // Pre-compute the length of blocks and replace
 324   // long branches with short if machine supports it.
 325   // Must be done before ScheduleAndBundle due to SPARC delay slots
 326   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 327   blk_starts[0] = 0;
 328   shorten_branches(blk_starts);
 329 

























 330   ScheduleAndBundle();
 331   if (C->failing()) {
 332     return;
 333   }
 334 
 335   perform_mach_node_analysis();
 336 
 337   // Complete sizing of codebuffer
 338   CodeBuffer* cb = init_buffer();
 339   if (cb == nullptr || C->failing()) {
 340     return;
 341   }
 342 
 343   BuildOopMaps();
 344 
 345   if (C->failing())  {
 346     return;
 347   }
 348 
 349   C2_MacroAssembler masm(cb);
 350   fill_buffer(&masm, blk_starts);





 351 }
 352 
 353 bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
 354   // Determine if we need to generate a stack overflow check.
 355   // Do it if the method is not a stub function and
 356   // has java calls or has frame size > vm_page_size/8.
 357   // The debug VM checks that deoptimization doesn't trigger an
 358   // unexpected stack overflow (compiled method stack banging should
 359   // guarantee it doesn't happen) so we always need the stack bang in
 360   // a debug VM.
 361   return (C->stub_function() == nullptr &&
 362           (C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3
 363            DEBUG_ONLY(|| true)));
 364 }
 365 
 366 bool PhaseOutput::need_register_stack_bang() const {
 367   // Determine if we need to generate a register stack overflow check.
 368   // This is only used on architectures which have split register
 369   // and memory stacks.
 370   // Bang if the method is not a stub function and has java calls

 471     // Sum all instruction sizes to compute block size
 472     uint last_inst = block->number_of_nodes();
 473     uint blk_size = 0;
 474     for (uint j = 0; j < last_inst; j++) {
 475       _index = j;
 476       Node* nj = block->get_node(_index);
 477       // Handle machine instruction nodes
 478       if (nj->is_Mach()) {
 479         MachNode* mach = nj->as_Mach();
 480         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 481         reloc_size += mach->reloc();
 482         if (mach->is_MachCall()) {
 483           // add size information for trampoline stub
 484           // class CallStubImpl is platform-specific and defined in the *.ad files.
 485           stub_size  += CallStubImpl::size_call_trampoline();
 486           reloc_size += CallStubImpl::reloc_call_trampoline();
 487 
 488           MachCallNode *mcall = mach->as_MachCall();
 489           // This destination address is NOT PC-relative
 490 
 491           mcall->method_set((intptr_t)mcall->entry_point());


 492 
 493           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 494             stub_size  += CompiledDirectCall::to_interp_stub_size();
 495             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 496           }
 497         } else if (mach->is_MachSafePoint()) {
 498           // If call/safepoint are adjacent, account for possible
 499           // nop to disambiguate the two safepoints.
 500           // ScheduleAndBundle() can rearrange nodes in a block,
 501           // check for all offsets inside this block.
 502           if (last_call_adr >= blk_starts[i]) {
 503             blk_size += nop_size;
 504           }
 505         }
 506         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 507           // Nop is inserted between "avoid back to back" instructions.
 508           // ScheduleAndBundle() can rearrange nodes in a block,
 509           // check for all offsets inside this block.
 510           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 511             blk_size += nop_size;

 726     // New functionality:
 727     //   Assert if the local is not top. In product mode let the new node
 728     //   override the old entry.
 729     assert(local == C->top(), "LocArray collision");
 730     if (local == C->top()) {
 731       return;
 732     }
 733     array->pop();
 734   }
 735   const Type *t = local->bottom_type();
 736 
 737   // Is it a safepoint scalar object node?
 738   if (local->is_SafePointScalarObject()) {
 739     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 740 
 741     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 742     if (sv == nullptr) {
 743       ciKlass* cik = t->is_oopptr()->exact_klass();
 744       assert(cik->is_instance_klass() ||
 745              cik->is_array_klass(), "Not supported allocation.");





























 746       sv = new ObjectValue(spobj->_idx,
 747                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 748       set_sv_for_object_node(objs, sv);
 749 
 750       uint first_ind = spobj->first_index(sfpt->jvms());
 751       for (uint i = 0; i < spobj->n_fields(); i++) {
 752         Node* fld_node = sfpt->in(first_ind+i);
 753         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 754       }
 755     }
 756     array->append(sv);
 757     return;
 758   } else if (local->is_SafePointScalarMerge()) {
 759     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 760     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 761 
 762     if (mv == nullptr) {
 763       GrowableArray<ScopeValue*> deps;
 764 
 765       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 766       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 767       assert(deps.length() == 1, "missing value");
 768 
 769       int selector_idx = smerge->selector_idx(sfpt->jvms());
 770       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

 976     if (!n->is_SafePointScalarObject()) {
 977       continue;
 978     }
 979 
 980     ObjectValue* other = sv_for_node_id(objs, n->_idx);
 981     if (ov == other) {
 982       return true;
 983     }
 984   }
 985   return false;
 986 }
 987 
 988 //--------------------------Process_OopMap_Node--------------------------------
 989 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 990   // Handle special safepoint nodes for synchronization
 991   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 992   MachCallNode      *mcall;
 993 
 994   int safepoint_pc_offset = current_offset;
 995   bool return_oop = false;

 996   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 997   bool arg_escape = false;
 998 
 999   // Add the safepoint in the DebugInfoRecorder
1000   if( !mach->is_MachCall() ) {
1001     mcall = nullptr;
1002     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1003   } else {
1004     mcall = mach->as_MachCall();
1005 
1006     if (mcall->is_MachCallJava()) {
1007       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1008     }
1009 
1010     // Check if a call returns an object.
1011     if (mcall->returns_pointer()) {
1012       return_oop = true;
1013     }



1014     safepoint_pc_offset += mcall->ret_addr_offset();
1015     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1016   }
1017 
1018   // Loop over the JVMState list to add scope information
1019   // Do not skip safepoints with a null method, they need monitor info
1020   JVMState* youngest_jvms = sfn->jvms();
1021   int max_depth = youngest_jvms->depth();
1022 
1023   // Allocate the object pool for scalar-replaced objects -- the map from
1024   // small-integer keys (which can be recorded in the local and ostack
1025   // arrays) to descriptions of the object state.
1026   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1027 
1028   // Visit scopes from oldest to youngest.
1029   for (int depth = 1; depth <= max_depth; depth++) {
1030     JVMState* jvms = youngest_jvms->of_depth(depth);
1031     int idx;
1032     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1033     // Safepoints that do not have method() set only provide oop-map and monitor info

1062     // Build the growable array of ScopeValues for exp stack
1063     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1064 
1065     // Loop over monitors and insert into array
1066     for (idx = 0; idx < num_mon; idx++) {
1067       // Grab the node that defines this monitor
1068       Node* box_node = sfn->monitor_box(jvms, idx);
1069       Node* obj_node = sfn->monitor_obj(jvms, idx);
1070 
1071       // Create ScopeValue for object
1072       ScopeValue *scval = nullptr;
1073 
1074       if (obj_node->is_SafePointScalarObject()) {
1075         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1076         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1077         if (scval == nullptr) {
1078           const Type *t = spobj->bottom_type();
1079           ciKlass* cik = t->is_oopptr()->exact_klass();
1080           assert(cik->is_instance_klass() ||
1081                  cik->is_array_klass(), "Not supported allocation.");














1082           ObjectValue* sv = new ObjectValue(spobj->_idx,
1083                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
1084           PhaseOutput::set_sv_for_object_node(objs, sv);
1085 
1086           uint first_ind = spobj->first_index(youngest_jvms);
1087           for (uint i = 0; i < spobj->n_fields(); i++) {
1088             Node* fld_node = sfn->in(first_ind+i);
1089             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1090           }
1091           scval = sv;
1092         }
1093       } else if (obj_node->is_SafePointScalarMerge()) {
1094         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1095         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1096 
1097         if (mv == nullptr) {
1098           GrowableArray<ScopeValue*> deps;
1099 
1100           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1101           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1102           assert(deps.length() == 1, "missing value");
1103 

1170     DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
1171     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1172     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1173 
1174     // Make method available for all Safepoints
1175     ciMethod* scope_method = method ? method : C->method();
1176     // Describe the scope here
1177     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1178     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1179     // Now we can describe the scope.
1180     methodHandle null_mh;
1181     bool rethrow_exception = false;
1182     C->debug_info()->describe_scope(
1183       safepoint_pc_offset,
1184       null_mh,
1185       scope_method,
1186       jvms->bci(),
1187       jvms->should_reexecute(),
1188       rethrow_exception,
1189       return_oop,

1190       has_ea_local_in_scope,
1191       arg_escape,
1192       locvals,
1193       expvals,
1194       monvals
1195     );
1196   } // End jvms loop
1197 
1198   // Mark the end of the scope set.
1199   C->debug_info()->end_safepoint(safepoint_pc_offset);
1200 }
1201 
1202 
1203 
1204 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1205 class NonSafepointEmitter {
1206     Compile*  C;
1207     JVMState* _pending_jvms;
1208     int       _pending_offset;
1209 

1523           MachNode *nop = new MachNopNode(nops_cnt);
1524           block->insert_node(nop, j++);
1525           last_inst++;
1526           C->cfg()->map_node_to_block(nop, block);
1527           // Ensure enough space.
1528           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1529           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1530             C->record_failure("CodeCache is full");
1531             return;
1532           }
1533           nop->emit(masm, C->regalloc());
1534           masm->code()->flush_bundle(true);
1535           current_offset = masm->offset();
1536         }
1537 
1538         bool observe_safepoint = is_sfn;
1539         // Remember the start of the last call in a basic block
1540         if (is_mcall) {
1541           MachCallNode *mcall = mach->as_MachCall();
1542 
1543           // This destination address is NOT PC-relative
1544           mcall->method_set((intptr_t)mcall->entry_point());


1545 
1546           // Save the return address
1547           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1548 
1549           observe_safepoint = mcall->guaranteed_safepoint();
1550         }
1551 
1552         // sfn will be valid whenever mcall is valid now because of inheritance
1553         if (observe_safepoint) {
1554           // Handle special safepoint nodes for synchronization
1555           if (!is_mcall) {
1556             MachSafePointNode *sfn = mach->as_MachSafePoint();
1557             // !!!!! Stubs only need an oopmap right now, so bail out
1558             if (sfn->jvms()->method() == nullptr) {
1559               // Write the oopmap directly to the code blob??!!
1560               continue;
1561             }
1562           } // End synchronization
1563 
1564           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1662       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1663         node_offsets[n->_idx] = masm->offset();
1664       }
1665 #endif
1666       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1667 
1668       // "Normal" instruction case
1669       DEBUG_ONLY(uint instr_offset = masm->offset());
1670       n->emit(masm, C->regalloc());
1671       current_offset = masm->offset();
1672 
1673       // Above we only verified that there is enough space in the instruction section.
1674       // However, the instruction may emit stubs that cause code buffer expansion.
1675       // Bail out here if expansion failed due to a lack of code cache space.
1676       if (C->failing()) {
1677         return;
1678       }
1679 
1680       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1681              "ret_addr_offset() not within emitted code");
1682 
1683 #ifdef ASSERT
1684       uint n_size = n->size(C->regalloc());
1685       if (n_size < (current_offset-instr_offset)) {
1686         MachNode* mach = n->as_Mach();
1687         n->dump();
1688         mach->dump_format(C->regalloc(), tty);
1689         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1690         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1691         tty->print_cr(" ------------------- ");
1692         BufferBlob* blob = this->scratch_buffer_blob();
1693         address blob_begin = blob->content_begin();
1694         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1695         assert(false, "wrong size of mach node");
1696       }
1697 #endif
1698       non_safepoints.observe_instruction(n, current_offset);
1699 
1700       // mcall is last "call" that can be a safepoint
1701       // record it so we can see if a poll will directly follow it
1702       // in which case we'll need a pad to make the PcDesc sites unique

2917         anti_do_use( b, n, _regalloc->get_reg_first(def) );
2918         anti_do_use( b, n, _regalloc->get_reg_second(def) );
2919       }
2920     }
2921     // Do not allow defs of new derived values to float above GC
2922     // points unless the base is definitely available at the GC point.
2923 
2924     Node *m = b->get_node(i);
2925 
2926     // Add precedence edge from following safepoint to use of derived pointer
2927     if( last_safept_node != end_node &&
2928         m != last_safept_node) {
2929       for (uint k = 1; k < m->req(); k++) {
2930         const Type *t = m->in(k)->bottom_type();
2931         if( t->isa_oop_ptr() &&
2932             t->is_ptr()->offset() != 0 ) {
2933           last_safept_node->add_prec( m );
2934           break;
2935         }
2936       }













2937     }
2938 
2939     if( n->jvms() ) {           // Precedence edge from derived to safept
2940       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2941       if( b->get_node(last_safept) != last_safept_node ) {
2942         last_safept = b->find_node(last_safept_node);
2943       }
2944       for( uint j=last_safept; j > i; j-- ) {
2945         Node *mach = b->get_node(j);
2946         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2947           mach->add_prec( n );
2948       }
2949       last_safept = i;
2950       last_safept_node = m;
2951     }
2952   }
2953 
2954   if (fat_proj_seen) {
2955     // Garbage collect pinch nodes that were not consumed.
2956     // They are usually created by a fat kill MachProj for a call.

3065 }
3066 #endif
3067 
3068 //-----------------------init_scratch_buffer_blob------------------------------
3069 // Construct a temporary BufferBlob and cache it for this compile.
3070 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3071   // If there is already a scratch buffer blob allocated and the
3072   // constant section is big enough, use it.  Otherwise free the
3073   // current and allocate a new one.
3074   BufferBlob* blob = scratch_buffer_blob();
3075   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3076     // Use the current blob.
3077   } else {
3078     if (blob != nullptr) {
3079       BufferBlob::free(blob);
3080     }
3081 
3082     ResourceMark rm;
3083     _scratch_const_size = const_size;
3084     int size = C2Compiler::initial_code_buffer_size(const_size);



















3085     blob = BufferBlob::create("Compile::scratch_buffer", size);
3086     // Record the buffer blob for next time.
3087     set_scratch_buffer_blob(blob);
3088     // Have we run out of code space?
3089     if (scratch_buffer_blob() == nullptr) {
3090       // Let CompilerBroker disable further compilations.
3091       C->record_failure("Not enough space for scratch buffer in CodeCache");
3092       return;
3093     }
3094   }
3095 
3096   // Initialize the relocation buffers
3097   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3098   set_scratch_locs_memory(locs_buf);
3099 }
3100 
3101 
3102 //-----------------------scratch_emit_size-------------------------------------
3103 // Helper function that computes size by emitting code
3104 uint PhaseOutput::scratch_emit_size(const Node* n) {

3135   buf.insts()->set_scratch_emit();
3136   buf.stubs()->set_scratch_emit();
3137 
3138   // Do the emission.
3139 
3140   Label fakeL; // Fake label for branch instructions.
3141   Label*   saveL = nullptr;
3142   uint save_bnum = 0;
3143   bool is_branch = n->is_MachBranch();
3144   C2_MacroAssembler masm(&buf);
3145   masm.bind(fakeL);
3146   if (is_branch) {
3147     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3148     n->as_MachBranch()->label_set(&fakeL, 0);
3149   }
3150   n->emit(&masm, C->regalloc());
3151 
3152   // Emitting into the scratch buffer should not fail
3153   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3154 
3155   if (is_branch) // Restore label.

3156     n->as_MachBranch()->label_set(saveL, save_bnum);

3157 
3158   // End scratch_emit_size section.
3159   set_in_scratch_emit_size(false);
3160 
3161   return buf.insts_size();
3162 }
3163 
3164 void PhaseOutput::install() {
3165   if (!C->should_install_code()) {
3166     return;
3167   } else if (C->stub_function() != nullptr) {
3168     install_stub(C->stub_name());
3169   } else {
3170     install_code(C->method(),
3171                  C->entry_bci(),
3172                  CompileBroker::compiler2(),
3173                  C->has_unsafe_access(),
3174                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3175   }
3176 }
3177 
3178 void PhaseOutput::install_code(ciMethod*         target,
3179                                int               entry_bci,
3180                                AbstractCompiler* compiler,
3181                                bool              has_unsafe_access,
3182                                bool              has_wide_vectors) {
3183   // Check if we want to skip execution of all compiled code.
3184   {
3185 #ifndef PRODUCT
3186     if (OptoNoExecute) {
3187       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3188       return;
3189     }
3190 #endif
3191     Compile::TracePhase tp(_t_registerMethod);
3192 
3193     if (C->is_osr_compilation()) {
3194       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3195       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3196     } else {
3197       if (!target->is_static()) {
3198         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3199         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3200         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3201         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3202       }
3203       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3204       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3205     }
3206 
3207     C->env()->register_method(target,
3208                                      entry_bci,
3209                                      &_code_offsets,
3210                                      _orig_pc_slot_offset_in_bytes,
3211                                      code_buffer(),
3212                                      frame_size_in_words(),
3213                                      oop_map_set(),
3214                                      &_handler_table,
3215                                      inc_table(),
3216                                      compiler,
3217                                      has_unsafe_access,
3218                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3219                                      C->has_monitors(),
3220                                      C->has_scoped_access(),
3221                                      0);
3222 
3223     if (C->log() != nullptr) { // Print code cache state into compiler log
3224       C->log()->code_cache_state();
3225     }
3226   }
3227 }
3228 void PhaseOutput::install_stub(const char* stub_name) {
3229   // Entry point will be accessed using stub_entry_point();
3230   if (code_buffer() == nullptr) {
3231     Matcher::soft_match_failure();
3232   } else {
3233     if (PrintAssembly && (WizardMode || Verbose))
3234       tty->print_cr("### Stub::%s", stub_name);
3235 
3236     if (!C->failing()) {
3237       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3238 
3239       // Make the NMethod
3240       // For now we mark the frame as never safe for profile stackwalking
3241       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "opto/ad.hpp"
  39 #include "opto/block.hpp"
  40 #include "opto/c2_MacroAssembler.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/type.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 #include "utilities/xmlstream.hpp"
  55 
  56 #ifndef PRODUCT

 213     _first_block_size(0),
 214     _handler_table(),
 215     _inc_table(),
 216     _stub_list(),
 217     _oop_map_set(nullptr),
 218     _scratch_buffer_blob(nullptr),
 219     _scratch_locs_memory(nullptr),
 220     _scratch_const_size(-1),
 221     _in_scratch_emit_size(false),
 222     _frame_slots(0),
 223     _code_offsets(),
 224     _node_bundling_limit(0),
 225     _node_bundling_base(nullptr),
 226     _orig_pc_slot(0),
 227     _orig_pc_slot_offset_in_bytes(0),
 228     _buf_sizes(),
 229     _block(nullptr),
 230     _index(0) {
 231   C->set_output(this);
 232   if (C->stub_name() == nullptr) {
 233     int fixed_slots = C->fixed_slots();
 234     if (C->needs_stack_repair()) {
 235       fixed_slots -= 2;
 236     }
 237     // TODO 8284443 Only reserve extra slot if needed
 238     if (InlineTypeReturnedAsFields) {
 239       fixed_slots -= 2;
 240     }
 241     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 242   }
 243 }
 244 
 245 PhaseOutput::~PhaseOutput() {
 246   C->set_output(nullptr);
 247   if (_scratch_buffer_blob != nullptr) {
 248     BufferBlob::free(_scratch_buffer_blob);
 249   }
 250 }
 251 
 252 void PhaseOutput::perform_mach_node_analysis() {
 253   // Late barrier analysis must be done after schedule and bundle
 254   // Otherwise liveness based spilling will fail
 255   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 256   bs->late_barrier_analysis();
 257 
 258   pd_perform_mach_node_analysis();
 259 
 260   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 261 }
 262 
 263 // Convert Nodes to instruction bits and pass off to the VM
 264 void PhaseOutput::Output() {
 265   // RootNode goes
 266   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 267 
 268   // The number of new nodes (mostly MachNop) is proportional to
 269   // the number of java calls and inner loops which are aligned.
 270   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 271                             C->inner_loops()*(OptoLoopAlignment-1)),
 272                            "out of nodes before code generation" ) ) {
 273     return;
 274   }
 275   // Make sure I can find the Start Node
 276   Block *entry = C->cfg()->get_block(1);
 277   Block *broot = C->cfg()->get_root_block();
 278 
 279   const StartNode *start = entry->head()->as_Start();
 280 
 281   // Replace StartNode with prolog
 282   Label verified_entry;
 283   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 284   entry->map_node(prolog, 0);
 285   C->cfg()->map_node_to_block(prolog, entry);
 286   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 287 
 288   // Virtual methods need an unverified entry point
 289   if (C->is_osr_compilation()) {
 290     if (PoisonOSREntry) {

 291       // TODO: Should use a ShouldNotReachHereNode...
 292       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 293     }
 294   } else {
 295     if (C->method()) {
 296       if (C->method()->has_scalarized_args()) {
 297         // Add entry point to unpack all inline type arguments
 298         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 299         if (!C->method()->is_static()) {
 300           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 301           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 302           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 303           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 304         }
 305       } else if (!C->method()->is_static()) {
 306         // Insert unvalidated entry point
 307         C->cfg()->insert(broot, 0, new MachUEPNode());
 308       }
 309     }

 310   }
 311 
 312   // Break before main entry point
 313   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 314       (OptoBreakpoint && C->is_method_compilation())       ||
 315       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 316       (OptoBreakpointC2R && !C->method())                   ) {
 317     // checking for C->method() means that OptoBreakpoint does not apply to
 318     // runtime stubs or frame converters
 319     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 320   }
 321 
 322   // Insert epilogs before every return
 323   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 324     Block* block = C->cfg()->get_block(i);
 325     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 326       Node* m = block->end();
 327       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 328         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 329         block->add_inst(epilog);
 330         C->cfg()->map_node_to_block(epilog, block);
 331       }
 332     }
 333   }
 334 
 335   // Keeper of sizing aspects
 336   _buf_sizes = BufferSizingData();
 337 
 338   // Initialize code buffer
 339   estimate_buffer_size(_buf_sizes._const);
 340   if (C->failing()) return;
 341 
 342   // Pre-compute the length of blocks and replace
 343   // long branches with short if machine supports it.
 344   // Must be done before ScheduleAndBundle due to SPARC delay slots
 345   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 346   blk_starts[0] = 0;
 347   shorten_branches(blk_starts);
 348 
 349   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 350     // Compute the offsets of the entry points required by the inline type calling convention
 351     if (!C->method()->is_static()) {
 352       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 353       // Entry                     (unverified) @ offset 0
 354       // Verified_Inline_Entry_RO
 355       // Inline_Entry              (unverified)
 356       // Verified_Inline_Entry
 357       uint offset = 0;
 358       _code_offsets.set_value(CodeOffsets::Entry, offset);
 359 
 360       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 361       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 362 
 363       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 364       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 365 
 366       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 367       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 368     } else {
 369       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 370       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 371     }
 372   }
 373 
 374   ScheduleAndBundle();
 375   if (C->failing()) {
 376     return;
 377   }
 378 
 379   perform_mach_node_analysis();
 380 
 381   // Complete sizing of codebuffer
 382   CodeBuffer* cb = init_buffer();
 383   if (cb == nullptr || C->failing()) {
 384     return;
 385   }
 386 
 387   BuildOopMaps();
 388 
 389   if (C->failing())  {
 390     return;
 391   }
 392 
 393   C2_MacroAssembler masm(cb);
 394   fill_buffer(&masm, blk_starts);
 395   if (C->failing()) {
 396     // If we bailed out during matching, not all nodes were visited and the
 397     // label might be in inconsistent state (used but not bound). Reset it.
 398     verified_entry.reset();
 399   }
 400 }
 401 
 402 bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
 403   // Determine if we need to generate a stack overflow check.
 404   // Do it if the method is not a stub function and
 405   // has java calls or has frame size > vm_page_size/8.
 406   // The debug VM checks that deoptimization doesn't trigger an
 407   // unexpected stack overflow (compiled method stack banging should
 408   // guarantee it doesn't happen) so we always need the stack bang in
 409   // a debug VM.
 410   return (C->stub_function() == nullptr &&
 411           (C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3
 412            DEBUG_ONLY(|| true)));
 413 }
 414 
 415 bool PhaseOutput::need_register_stack_bang() const {
 416   // Determine if we need to generate a register stack overflow check.
 417   // This is only used on architectures which have split register
 418   // and memory stacks.
 419   // Bang if the method is not a stub function and has java calls

 520     // Sum all instruction sizes to compute block size
 521     uint last_inst = block->number_of_nodes();
 522     uint blk_size = 0;
 523     for (uint j = 0; j < last_inst; j++) {
 524       _index = j;
 525       Node* nj = block->get_node(_index);
 526       // Handle machine instruction nodes
 527       if (nj->is_Mach()) {
 528         MachNode* mach = nj->as_Mach();
 529         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 530         reloc_size += mach->reloc();
 531         if (mach->is_MachCall()) {
 532           // add size information for trampoline stub
 533           // class CallStubImpl is platform-specific and defined in the *.ad files.
 534           stub_size  += CallStubImpl::size_call_trampoline();
 535           reloc_size += CallStubImpl::reloc_call_trampoline();
 536 
 537           MachCallNode *mcall = mach->as_MachCall();
 538           // This destination address is NOT PC-relative
 539 
 540           if (mcall->entry_point() != nullptr) {
 541             mcall->method_set((intptr_t)mcall->entry_point());
 542           }
 543 
 544           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 545             stub_size  += CompiledDirectCall::to_interp_stub_size();
 546             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 547           }
 548         } else if (mach->is_MachSafePoint()) {
 549           // If call/safepoint are adjacent, account for possible
 550           // nop to disambiguate the two safepoints.
 551           // ScheduleAndBundle() can rearrange nodes in a block,
 552           // check for all offsets inside this block.
 553           if (last_call_adr >= blk_starts[i]) {
 554             blk_size += nop_size;
 555           }
 556         }
 557         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 558           // Nop is inserted between "avoid back to back" instructions.
 559           // ScheduleAndBundle() can rearrange nodes in a block,
 560           // check for all offsets inside this block.
 561           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 562             blk_size += nop_size;

 777     // New functionality:
 778     //   Assert if the local is not top. In product mode let the new node
 779     //   override the old entry.
 780     assert(local == C->top(), "LocArray collision");
 781     if (local == C->top()) {
 782       return;
 783     }
 784     array->pop();
 785   }
 786   const Type *t = local->bottom_type();
 787 
 788   // Is it a safepoint scalar object node?
 789   if (local->is_SafePointScalarObject()) {
 790     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 791 
 792     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 793     if (sv == nullptr) {
 794       ciKlass* cik = t->is_oopptr()->exact_klass();
 795       assert(cik->is_instance_klass() ||
 796              cik->is_array_klass(), "Not supported allocation.");
 797       uint first_ind = spobj->first_index(sfpt->jvms());
 798       // Nullable, scalarized inline types have a null_marker input
 799       // that needs to be checked before using the field values.
 800       ScopeValue* properties = nullptr;
 801       if (cik->is_inlinetype()) {
 802         Node* null_marker_node = sfpt->in(first_ind++);
 803         assert(null_marker_node != nullptr, "null_marker node not found");
 804         if (!null_marker_node->is_top()) {
 805           const TypeInt* null_marker_type = null_marker_node->bottom_type()->is_int();
 806           if (null_marker_node->is_Con()) {
 807             properties = new ConstantIntValue(null_marker_type->get_con());
 808           } else {
 809             OptoReg::Name null_marker_reg = C->regalloc()->get_reg_first(null_marker_node);
 810             properties = new_loc_value(C->regalloc(), null_marker_reg, Location::normal);
 811           }
 812         }
 813       }
 814       if (cik->is_array_klass() && !cik->is_type_array_klass()) {
 815         jint props = ArrayKlass::ArrayProperties::DEFAULT;
 816         if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
 817           if (cik->as_array_klass()->is_elem_null_free()) {
 818             props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
 819           }
 820           if (!cik->as_array_klass()->is_elem_atomic()) {
 821             props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
 822           }
 823         }
 824         properties = new ConstantIntValue(props);
 825       }
 826       sv = new ObjectValue(spobj->_idx,
 827                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
 828       set_sv_for_object_node(objs, sv);
 829 

 830       for (uint i = 0; i < spobj->n_fields(); i++) {
 831         Node* fld_node = sfpt->in(first_ind+i);
 832         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 833       }
 834     }
 835     array->append(sv);
 836     return;
 837   } else if (local->is_SafePointScalarMerge()) {
 838     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 839     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 840 
 841     if (mv == nullptr) {
 842       GrowableArray<ScopeValue*> deps;
 843 
 844       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 845       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 846       assert(deps.length() == 1, "missing value");
 847 
 848       int selector_idx = smerge->selector_idx(sfpt->jvms());
 849       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

1055     if (!n->is_SafePointScalarObject()) {
1056       continue;
1057     }
1058 
1059     ObjectValue* other = sv_for_node_id(objs, n->_idx);
1060     if (ov == other) {
1061       return true;
1062     }
1063   }
1064   return false;
1065 }
1066 
1067 //--------------------------Process_OopMap_Node--------------------------------
1068 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1069   // Handle special safepoint nodes for synchronization
1070   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1071   MachCallNode      *mcall;
1072 
1073   int safepoint_pc_offset = current_offset;
1074   bool return_oop = false;
1075   bool return_scalarized = false;
1076   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1077   bool arg_escape = false;
1078 
1079   // Add the safepoint in the DebugInfoRecorder
1080   if( !mach->is_MachCall() ) {
1081     mcall = nullptr;
1082     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1083   } else {
1084     mcall = mach->as_MachCall();
1085 
1086     if (mcall->is_MachCallJava()) {
1087       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1088     }
1089 
1090     // Check if a call returns an object.
1091     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1092       return_oop = true;
1093     }
1094     if (mcall->returns_scalarized()) {
1095       return_scalarized = true;
1096     }
1097     safepoint_pc_offset += mcall->ret_addr_offset();
1098     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1099   }
1100 
1101   // Loop over the JVMState list to add scope information
1102   // Do not skip safepoints with a null method, they need monitor info
1103   JVMState* youngest_jvms = sfn->jvms();
1104   int max_depth = youngest_jvms->depth();
1105 
1106   // Allocate the object pool for scalar-replaced objects -- the map from
1107   // small-integer keys (which can be recorded in the local and ostack
1108   // arrays) to descriptions of the object state.
1109   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1110 
1111   // Visit scopes from oldest to youngest.
1112   for (int depth = 1; depth <= max_depth; depth++) {
1113     JVMState* jvms = youngest_jvms->of_depth(depth);
1114     int idx;
1115     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1116     // Safepoints that do not have method() set only provide oop-map and monitor info

1145     // Build the growable array of ScopeValues for exp stack
1146     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1147 
1148     // Loop over monitors and insert into array
1149     for (idx = 0; idx < num_mon; idx++) {
1150       // Grab the node that defines this monitor
1151       Node* box_node = sfn->monitor_box(jvms, idx);
1152       Node* obj_node = sfn->monitor_obj(jvms, idx);
1153 
1154       // Create ScopeValue for object
1155       ScopeValue *scval = nullptr;
1156 
1157       if (obj_node->is_SafePointScalarObject()) {
1158         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1159         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1160         if (scval == nullptr) {
1161           const Type *t = spobj->bottom_type();
1162           ciKlass* cik = t->is_oopptr()->exact_klass();
1163           assert(cik->is_instance_klass() ||
1164                  cik->is_array_klass(), "Not supported allocation.");
1165           assert(!cik->is_inlinetype(), "Synchronization on value object?");
1166           ScopeValue* properties = nullptr;
1167           if (cik->is_array_klass() && !cik->is_type_array_klass()) {
1168             jint props = ArrayKlass::ArrayProperties::DEFAULT;
1169             if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
1170               if (cik->as_array_klass()->is_elem_null_free()) {
1171                 props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
1172               }
1173               if (!cik->as_array_klass()->is_elem_atomic()) {
1174                 props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
1175               }
1176             }
1177             properties = new ConstantIntValue(props);
1178           }
1179           ObjectValue* sv = new ObjectValue(spobj->_idx,
1180                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
1181           PhaseOutput::set_sv_for_object_node(objs, sv);
1182 
1183           uint first_ind = spobj->first_index(youngest_jvms);
1184           for (uint i = 0; i < spobj->n_fields(); i++) {
1185             Node* fld_node = sfn->in(first_ind+i);
1186             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1187           }
1188           scval = sv;
1189         }
1190       } else if (obj_node->is_SafePointScalarMerge()) {
1191         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1192         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1193 
1194         if (mv == nullptr) {
1195           GrowableArray<ScopeValue*> deps;
1196 
1197           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1198           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1199           assert(deps.length() == 1, "missing value");
1200 

1267     DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
1268     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1269     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1270 
1271     // Make method available for all Safepoints
1272     ciMethod* scope_method = method ? method : C->method();
1273     // Describe the scope here
1274     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1275     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1276     // Now we can describe the scope.
1277     methodHandle null_mh;
1278     bool rethrow_exception = false;
1279     C->debug_info()->describe_scope(
1280       safepoint_pc_offset,
1281       null_mh,
1282       scope_method,
1283       jvms->bci(),
1284       jvms->should_reexecute(),
1285       rethrow_exception,
1286       return_oop,
1287       return_scalarized,
1288       has_ea_local_in_scope,
1289       arg_escape,
1290       locvals,
1291       expvals,
1292       monvals
1293     );
1294   } // End jvms loop
1295 
1296   // Mark the end of the scope set.
1297   C->debug_info()->end_safepoint(safepoint_pc_offset);
1298 }
1299 
1300 
1301 
1302 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1303 class NonSafepointEmitter {
1304     Compile*  C;
1305     JVMState* _pending_jvms;
1306     int       _pending_offset;
1307 

1621           MachNode *nop = new MachNopNode(nops_cnt);
1622           block->insert_node(nop, j++);
1623           last_inst++;
1624           C->cfg()->map_node_to_block(nop, block);
1625           // Ensure enough space.
1626           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1627           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1628             C->record_failure("CodeCache is full");
1629             return;
1630           }
1631           nop->emit(masm, C->regalloc());
1632           masm->code()->flush_bundle(true);
1633           current_offset = masm->offset();
1634         }
1635 
1636         bool observe_safepoint = is_sfn;
1637         // Remember the start of the last call in a basic block
1638         if (is_mcall) {
1639           MachCallNode *mcall = mach->as_MachCall();
1640 
1641           if (mcall->entry_point() != nullptr) {
1642             // This destination address is NOT PC-relative
1643             mcall->method_set((intptr_t)mcall->entry_point());
1644           }
1645 
1646           // Save the return address
1647           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1648 
1649           observe_safepoint = mcall->guaranteed_safepoint();
1650         }
1651 
1652         // sfn will be valid whenever mcall is valid now because of inheritance
1653         if (observe_safepoint) {
1654           // Handle special safepoint nodes for synchronization
1655           if (!is_mcall) {
1656             MachSafePointNode *sfn = mach->as_MachSafePoint();
1657             // !!!!! Stubs only need an oopmap right now, so bail out
1658             if (sfn->jvms()->method() == nullptr) {
1659               // Write the oopmap directly to the code blob??!!
1660               continue;
1661             }
1662           } // End synchronization
1663 
1664           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1762       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1763         node_offsets[n->_idx] = masm->offset();
1764       }
1765 #endif
1766       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1767 
1768       // "Normal" instruction case
1769       DEBUG_ONLY(uint instr_offset = masm->offset());
1770       n->emit(masm, C->regalloc());
1771       current_offset = masm->offset();
1772 
1773       // Above we only verified that there is enough space in the instruction section.
1774       // However, the instruction may emit stubs that cause code buffer expansion.
1775       // Bail out here if expansion failed due to a lack of code cache space.
1776       if (C->failing()) {
1777         return;
1778       }
1779 
1780       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1781              "ret_addr_offset() not within emitted code");

1782 #ifdef ASSERT
1783       uint n_size = n->size(C->regalloc());
1784       if (n_size < (current_offset-instr_offset)) {
1785         MachNode* mach = n->as_Mach();
1786         n->dump();
1787         mach->dump_format(C->regalloc(), tty);
1788         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1789         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1790         tty->print_cr(" ------------------- ");
1791         BufferBlob* blob = this->scratch_buffer_blob();
1792         address blob_begin = blob->content_begin();
1793         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1794         assert(false, "wrong size of mach node");
1795       }
1796 #endif
1797       non_safepoints.observe_instruction(n, current_offset);
1798 
1799       // mcall is last "call" that can be a safepoint
1800       // record it so we can see if a poll will directly follow it
1801       // in which case we'll need a pad to make the PcDesc sites unique

3016         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3017         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3018       }
3019     }
3020     // Do not allow defs of new derived values to float above GC
3021     // points unless the base is definitely available at the GC point.
3022 
3023     Node *m = b->get_node(i);
3024 
3025     // Add precedence edge from following safepoint to use of derived pointer
3026     if( last_safept_node != end_node &&
3027         m != last_safept_node) {
3028       for (uint k = 1; k < m->req(); k++) {
3029         const Type *t = m->in(k)->bottom_type();
3030         if( t->isa_oop_ptr() &&
3031             t->is_ptr()->offset() != 0 ) {
3032           last_safept_node->add_prec( m );
3033           break;
3034         }
3035       }
3036 
3037       // Do not allow a CheckCastPP node whose input is a raw pointer to
3038       // float past a safepoint.  This can occur when a buffered inline
3039       // type is allocated in a loop and the CheckCastPP from that
3040       // allocation is reused outside the loop.  If the use inside the
3041       // loop is scalarized the CheckCastPP will no longer be connected
3042       // to the loop safepoint.  See JDK-8264340.
3043       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3044         Node *def = m->in(1);
3045         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3046           last_safept_node->add_prec(m);
3047         }
3048       }
3049     }
3050 
3051     if( n->jvms() ) {           // Precedence edge from derived to safept
3052       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3053       if( b->get_node(last_safept) != last_safept_node ) {
3054         last_safept = b->find_node(last_safept_node);
3055       }
3056       for( uint j=last_safept; j > i; j-- ) {
3057         Node *mach = b->get_node(j);
3058         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3059           mach->add_prec( n );
3060       }
3061       last_safept = i;
3062       last_safept_node = m;
3063     }
3064   }
3065 
3066   if (fat_proj_seen) {
3067     // Garbage collect pinch nodes that were not consumed.
3068     // They are usually created by a fat kill MachProj for a call.

3177 }
3178 #endif
3179 
3180 //-----------------------init_scratch_buffer_blob------------------------------
3181 // Construct a temporary BufferBlob and cache it for this compile.
3182 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3183   // If there is already a scratch buffer blob allocated and the
3184   // constant section is big enough, use it.  Otherwise free the
3185   // current and allocate a new one.
3186   BufferBlob* blob = scratch_buffer_blob();
3187   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3188     // Use the current blob.
3189   } else {
3190     if (blob != nullptr) {
3191       BufferBlob::free(blob);
3192     }
3193 
3194     ResourceMark rm;
3195     _scratch_const_size = const_size;
3196     int size = C2Compiler::initial_code_buffer_size(const_size);
3197     if (C->has_scalarized_args()) {
3198       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3199       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3200       ciMethod* method = C->method();
3201       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3202       int arg_num = 0;
3203       if (!method->is_static()) {
3204         if (method->is_scalarized_arg(arg_num)) {
3205           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3206         }
3207         arg_num++;
3208       }
3209       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3210         if (method->is_scalarized_arg(arg_num)) {
3211           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3212         }
3213         arg_num++;
3214       }
3215     }
3216     blob = BufferBlob::create("Compile::scratch_buffer", size);
3217     // Record the buffer blob for next time.
3218     set_scratch_buffer_blob(blob);
3219     // Have we run out of code space?
3220     if (scratch_buffer_blob() == nullptr) {
3221       // Let CompilerBroker disable further compilations.
3222       C->record_failure("Not enough space for scratch buffer in CodeCache");
3223       return;
3224     }
3225   }
3226 
3227   // Initialize the relocation buffers
3228   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3229   set_scratch_locs_memory(locs_buf);
3230 }
3231 
3232 
3233 //-----------------------scratch_emit_size-------------------------------------
3234 // Helper function that computes size by emitting code
3235 uint PhaseOutput::scratch_emit_size(const Node* n) {

3266   buf.insts()->set_scratch_emit();
3267   buf.stubs()->set_scratch_emit();
3268 
3269   // Do the emission.
3270 
3271   Label fakeL; // Fake label for branch instructions.
3272   Label*   saveL = nullptr;
3273   uint save_bnum = 0;
3274   bool is_branch = n->is_MachBranch();
3275   C2_MacroAssembler masm(&buf);
3276   masm.bind(fakeL);
3277   if (is_branch) {
3278     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3279     n->as_MachBranch()->label_set(&fakeL, 0);
3280   }
3281   n->emit(&masm, C->regalloc());
3282 
3283   // Emitting into the scratch buffer should not fail
3284   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3285 
3286   // Restore label.
3287   if (is_branch) {
3288     n->as_MachBranch()->label_set(saveL, save_bnum);
3289   }
3290 
3291   // End scratch_emit_size section.
3292   set_in_scratch_emit_size(false);
3293 
3294   return buf.insts_size();
3295 }
3296 
3297 void PhaseOutput::install() {
3298   if (!C->should_install_code()) {
3299     return;
3300   } else if (C->stub_function() != nullptr) {
3301     install_stub(C->stub_name());
3302   } else {
3303     install_code(C->method(),
3304                  C->entry_bci(),
3305                  CompileBroker::compiler2(),
3306                  C->has_unsafe_access(),
3307                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3308   }
3309 }
3310 
3311 void PhaseOutput::install_code(ciMethod*         target,
3312                                int               entry_bci,
3313                                AbstractCompiler* compiler,
3314                                bool              has_unsafe_access,
3315                                bool              has_wide_vectors) {
3316   // Check if we want to skip execution of all compiled code.
3317   {
3318 #ifndef PRODUCT
3319     if (OptoNoExecute) {
3320       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3321       return;
3322     }
3323 #endif
3324     Compile::TracePhase tp(_t_registerMethod);
3325 
3326     if (C->is_osr_compilation()) {
3327       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3328       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3329     } else {






3330       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3331       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3332         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3333       }
3334       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3335         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3336       }
3337       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3338         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3339       }
3340       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3341     }
3342 
3343     C->env()->register_method(target,
3344                               entry_bci,
3345                               &_code_offsets,
3346                               _orig_pc_slot_offset_in_bytes,
3347                               code_buffer(),
3348                               frame_size_in_words(),
3349                               _oop_map_set,
3350                               &_handler_table,
3351                               inc_table(),
3352                               compiler,
3353                               has_unsafe_access,
3354                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3355                               C->has_monitors(),
3356                               C->has_scoped_access(),
3357                               0);
3358 
3359     if (C->log() != nullptr) { // Print code cache state into compiler log
3360       C->log()->code_cache_state();
3361     }
3362   }
3363 }
3364 void PhaseOutput::install_stub(const char* stub_name) {
3365   // Entry point will be accessed using stub_entry_point();
3366   if (code_buffer() == nullptr) {
3367     Matcher::soft_match_failure();
3368   } else {
3369     if (PrintAssembly && (WizardMode || Verbose))
3370       tty->print_cr("### Stub::%s", stub_name);
3371 
3372     if (!C->failing()) {
3373       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3374 
3375       // Make the NMethod
3376       // For now we mark the frame as never safe for profile stackwalking
3377       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >