< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"

  36 #include "memory/allocation.hpp"
  37 #include "opto/ad.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/c2_MacroAssembler.hpp"
  40 #include "opto/c2compiler.hpp"
  41 #include "opto/callnode.hpp"
  42 #include "opto/cfgnode.hpp"
  43 #include "opto/locknode.hpp"
  44 #include "opto/machnode.hpp"
  45 #include "opto/node.hpp"
  46 #include "opto/optoreg.hpp"
  47 #include "opto/output.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/type.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/macros.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 #include "utilities/xmlstream.hpp"
  54 
  55 #ifndef PRODUCT

 224     _first_block_size(0),
 225     _handler_table(),
 226     _inc_table(),
 227     _stub_list(),
 228     _oop_map_set(nullptr),
 229     _scratch_buffer_blob(nullptr),
 230     _scratch_locs_memory(nullptr),
 231     _scratch_const_size(-1),
 232     _in_scratch_emit_size(false),
 233     _frame_slots(0),
 234     _code_offsets(),
 235     _node_bundling_limit(0),
 236     _node_bundling_base(nullptr),
 237     _orig_pc_slot(0),
 238     _orig_pc_slot_offset_in_bytes(0),
 239     _buf_sizes(),
 240     _block(nullptr),
 241     _index(0) {
 242   C->set_output(this);
 243   if (C->stub_name() == nullptr) {
 244     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 245   }
 246 }
 247 
 248 PhaseOutput::~PhaseOutput() {
 249   C->set_output(nullptr);
 250   if (_scratch_buffer_blob != nullptr) {
 251     BufferBlob::free(_scratch_buffer_blob);
 252   }
 253 }
 254 
 255 void PhaseOutput::perform_mach_node_analysis() {
 256   // Late barrier analysis must be done after schedule and bundle
 257   // Otherwise liveness based spilling will fail
 258   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 259   bs->late_barrier_analysis();
 260 
 261   pd_perform_mach_node_analysis();
 262 
 263   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 264 }
 265 
 266 // Convert Nodes to instruction bits and pass off to the VM
 267 void PhaseOutput::Output() {
 268   // RootNode goes
 269   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 270 
 271   // The number of new nodes (mostly MachNop) is proportional to
 272   // the number of java calls and inner loops which are aligned.
 273   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 274                             C->inner_loops()*(OptoLoopAlignment-1)),
 275                            "out of nodes before code generation" ) ) {
 276     return;
 277   }
 278   // Make sure I can find the Start Node
 279   Block *entry = C->cfg()->get_block(1);
 280   Block *broot = C->cfg()->get_root_block();
 281 
 282   const StartNode *start = entry->head()->as_Start();
 283 
 284   // Replace StartNode with prolog
 285   MachPrologNode *prolog = new MachPrologNode();

 286   entry->map_node(prolog, 0);
 287   C->cfg()->map_node_to_block(prolog, entry);
 288   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 289 
 290   // Virtual methods need an unverified entry point
 291 
 292   if( C->is_osr_compilation() ) {
 293     if( PoisonOSREntry ) {
 294       // TODO: Should use a ShouldNotReachHereNode...
 295       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 296     }
 297   } else {
 298     if( C->method() && !C->method()->flags().is_static() ) {
 299       // Insert unvalidated entry point
 300       C->cfg()->insert( broot, 0, new MachUEPNode() );











 301     }
 302 
 303   }
 304 
 305   // Break before main entry point
 306   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 307       (OptoBreakpoint && C->is_method_compilation())       ||
 308       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 309       (OptoBreakpointC2R && !C->method())                   ) {
 310     // checking for C->method() means that OptoBreakpoint does not apply to
 311     // runtime stubs or frame converters
 312     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 313   }
 314 
 315   // Insert epilogs before every return
 316   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 317     Block* block = C->cfg()->get_block(i);
 318     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 319       Node* m = block->end();
 320       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 321         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 322         block->add_inst(epilog);
 323         C->cfg()->map_node_to_block(epilog, block);
 324       }
 325     }
 326   }
 327 
 328   // Keeper of sizing aspects
 329   _buf_sizes = BufferSizingData();
 330 
 331   // Initialize code buffer
 332   estimate_buffer_size(_buf_sizes._const);
 333   if (C->failing()) return;
 334 
 335   // Pre-compute the length of blocks and replace
 336   // long branches with short if machine supports it.
 337   // Must be done before ScheduleAndBundle due to SPARC delay slots
 338   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 339   blk_starts[0] = 0;
 340   shorten_branches(blk_starts);
 341 

























 342   ScheduleAndBundle();
 343   if (C->failing()) {
 344     return;
 345   }
 346 
 347   perform_mach_node_analysis();
 348 
 349   // Complete sizing of codebuffer
 350   CodeBuffer* cb = init_buffer();
 351   if (cb == nullptr || C->failing()) {
 352     return;
 353   }
 354 
 355   BuildOopMaps();
 356 
 357   if (C->failing())  {
 358     return;
 359   }
 360 
 361   C2_MacroAssembler masm(cb);

 483     // Sum all instruction sizes to compute block size
 484     uint last_inst = block->number_of_nodes();
 485     uint blk_size = 0;
 486     for (uint j = 0; j < last_inst; j++) {
 487       _index = j;
 488       Node* nj = block->get_node(_index);
 489       // Handle machine instruction nodes
 490       if (nj->is_Mach()) {
 491         MachNode* mach = nj->as_Mach();
 492         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 493         reloc_size += mach->reloc();
 494         if (mach->is_MachCall()) {
 495           // add size information for trampoline stub
 496           // class CallStubImpl is platform-specific and defined in the *.ad files.
 497           stub_size  += CallStubImpl::size_call_trampoline();
 498           reloc_size += CallStubImpl::reloc_call_trampoline();
 499 
 500           MachCallNode *mcall = mach->as_MachCall();
 501           // This destination address is NOT PC-relative
 502 
 503           mcall->method_set((intptr_t)mcall->entry_point());


 504 
 505           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 506             stub_size  += CompiledDirectCall::to_interp_stub_size();
 507             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 508           }
 509         } else if (mach->is_MachSafePoint()) {
 510           // If call/safepoint are adjacent, account for possible
 511           // nop to disambiguate the two safepoints.
 512           // ScheduleAndBundle() can rearrange nodes in a block,
 513           // check for all offsets inside this block.
 514           if (last_call_adr >= blk_starts[i]) {
 515             blk_size += nop_size;
 516           }
 517         }
 518         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 519           // Nop is inserted between "avoid back to back" instructions.
 520           // ScheduleAndBundle() can rearrange nodes in a block,
 521           // check for all offsets inside this block.
 522           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 523             blk_size += nop_size;

 738     // New functionality:
 739     //   Assert if the local is not top. In product mode let the new node
 740     //   override the old entry.
 741     assert(local == C->top(), "LocArray collision");
 742     if (local == C->top()) {
 743       return;
 744     }
 745     array->pop();
 746   }
 747   const Type *t = local->bottom_type();
 748 
 749   // Is it a safepoint scalar object node?
 750   if (local->is_SafePointScalarObject()) {
 751     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 752 
 753     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 754     if (sv == nullptr) {
 755       ciKlass* cik = t->is_oopptr()->exact_klass();
 756       assert(cik->is_instance_klass() ||
 757              cik->is_array_klass(), "Not supported allocation.");





























 758       sv = new ObjectValue(spobj->_idx,
 759                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 760       set_sv_for_object_node(objs, sv);
 761 
 762       uint first_ind = spobj->first_index(sfpt->jvms());
 763       for (uint i = 0; i < spobj->n_fields(); i++) {
 764         Node* fld_node = sfpt->in(first_ind+i);
 765         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 766       }
 767     }
 768     array->append(sv);
 769     return;
 770   } else if (local->is_SafePointScalarMerge()) {
 771     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 772     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 773 
 774     if (mv == nullptr) {
 775       GrowableArray<ScopeValue*> deps;
 776 
 777       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 778       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 779       assert(deps.length() == 1, "missing value");
 780 
 781       int selector_idx = smerge->selector_idx(sfpt->jvms());
 782       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

 989       continue;
 990     }
 991 
 992     ObjectValue* other = sv_for_node_id(objs, n->_idx);
 993     if (ov == other) {
 994       return true;
 995     }
 996   }
 997   return false;
 998 }
 999 
1000 //--------------------------Process_OopMap_Node--------------------------------
1001 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1002   // Handle special safepoint nodes for synchronization
1003   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1004   MachCallNode      *mcall;
1005 
1006   int safepoint_pc_offset = current_offset;
1007   bool is_method_handle_invoke = false;
1008   bool return_oop = false;

1009   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1010   bool arg_escape = false;
1011 
1012   // Add the safepoint in the DebugInfoRecorder
1013   if( !mach->is_MachCall() ) {
1014     mcall = nullptr;
1015     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1016   } else {
1017     mcall = mach->as_MachCall();
1018 
1019     // Is the call a MethodHandle call?
1020     if (mcall->is_MachCallJava()) {
1021       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1022         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1023         is_method_handle_invoke = true;
1024       }
1025       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1026     }
1027 
1028     // Check if a call returns an object.
1029     if (mcall->returns_pointer()) {
1030       return_oop = true;
1031     }



1032     safepoint_pc_offset += mcall->ret_addr_offset();
1033     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1034   }
1035 
1036   // Loop over the JVMState list to add scope information
1037   // Do not skip safepoints with a null method, they need monitor info
1038   JVMState* youngest_jvms = sfn->jvms();
1039   int max_depth = youngest_jvms->depth();
1040 
1041   // Allocate the object pool for scalar-replaced objects -- the map from
1042   // small-integer keys (which can be recorded in the local and ostack
1043   // arrays) to descriptions of the object state.
1044   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1045 
1046   // Visit scopes from oldest to youngest.
1047   for (int depth = 1; depth <= max_depth; depth++) {
1048     JVMState* jvms = youngest_jvms->of_depth(depth);
1049     int idx;
1050     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1051     // Safepoints that do not have method() set only provide oop-map and monitor info

1080     // Build the growable array of ScopeValues for exp stack
1081     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1082 
1083     // Loop over monitors and insert into array
1084     for (idx = 0; idx < num_mon; idx++) {
1085       // Grab the node that defines this monitor
1086       Node* box_node = sfn->monitor_box(jvms, idx);
1087       Node* obj_node = sfn->monitor_obj(jvms, idx);
1088 
1089       // Create ScopeValue for object
1090       ScopeValue *scval = nullptr;
1091 
1092       if (obj_node->is_SafePointScalarObject()) {
1093         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1094         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1095         if (scval == nullptr) {
1096           const Type *t = spobj->bottom_type();
1097           ciKlass* cik = t->is_oopptr()->exact_klass();
1098           assert(cik->is_instance_klass() ||
1099                  cik->is_array_klass(), "Not supported allocation.");














1100           ObjectValue* sv = new ObjectValue(spobj->_idx,
1101                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
1102           PhaseOutput::set_sv_for_object_node(objs, sv);
1103 
1104           uint first_ind = spobj->first_index(youngest_jvms);
1105           for (uint i = 0; i < spobj->n_fields(); i++) {
1106             Node* fld_node = sfn->in(first_ind+i);
1107             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1108           }
1109           scval = sv;
1110         }
1111       } else if (obj_node->is_SafePointScalarMerge()) {
1112         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1113         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1114 
1115         if (mv == nullptr) {
1116           GrowableArray<ScopeValue*> deps;
1117 
1118           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1119           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1120           assert(deps.length() == 1, "missing value");
1121 

1189     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1190     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1191 
1192     // Make method available for all Safepoints
1193     ciMethod* scope_method = method ? method : C->method();
1194     // Describe the scope here
1195     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1196     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1197     // Now we can describe the scope.
1198     methodHandle null_mh;
1199     bool rethrow_exception = false;
1200     C->debug_info()->describe_scope(
1201       safepoint_pc_offset,
1202       null_mh,
1203       scope_method,
1204       jvms->bci(),
1205       jvms->should_reexecute(),
1206       rethrow_exception,
1207       is_method_handle_invoke,
1208       return_oop,

1209       has_ea_local_in_scope,
1210       arg_escape,
1211       locvals,
1212       expvals,
1213       monvals
1214     );
1215   } // End jvms loop
1216 
1217   // Mark the end of the scope set.
1218   C->debug_info()->end_safepoint(safepoint_pc_offset);
1219 }
1220 
1221 
1222 
1223 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1224 class NonSafepointEmitter {
1225     Compile*  C;
1226     JVMState* _pending_jvms;
1227     int       _pending_offset;
1228 

1564           MachNode *nop = new MachNopNode(nops_cnt);
1565           block->insert_node(nop, j++);
1566           last_inst++;
1567           C->cfg()->map_node_to_block(nop, block);
1568           // Ensure enough space.
1569           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1570           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1571             C->record_failure("CodeCache is full");
1572             return;
1573           }
1574           nop->emit(masm, C->regalloc());
1575           masm->code()->flush_bundle(true);
1576           current_offset = masm->offset();
1577         }
1578 
1579         bool observe_safepoint = is_sfn;
1580         // Remember the start of the last call in a basic block
1581         if (is_mcall) {
1582           MachCallNode *mcall = mach->as_MachCall();
1583 
1584           // This destination address is NOT PC-relative
1585           mcall->method_set((intptr_t)mcall->entry_point());


1586 
1587           // Save the return address
1588           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1589 
1590           observe_safepoint = mcall->guaranteed_safepoint();
1591         }
1592 
1593         // sfn will be valid whenever mcall is valid now because of inheritance
1594         if (observe_safepoint) {
1595           // Handle special safepoint nodes for synchronization
1596           if (!is_mcall) {
1597             MachSafePointNode *sfn = mach->as_MachSafePoint();
1598             // !!!!! Stubs only need an oopmap right now, so bail out
1599             if (sfn->jvms()->method() == nullptr) {
1600               // Write the oopmap directly to the code blob??!!
1601               continue;
1602             }
1603           } // End synchronization
1604 
1605           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1706       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1707         node_offsets[n->_idx] = masm->offset();
1708       }
1709 #endif
1710       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1711 
1712       // "Normal" instruction case
1713       DEBUG_ONLY(uint instr_offset = masm->offset());
1714       n->emit(masm, C->regalloc());
1715       current_offset = masm->offset();
1716 
1717       // Above we only verified that there is enough space in the instruction section.
1718       // However, the instruction may emit stubs that cause code buffer expansion.
1719       // Bail out here if expansion failed due to a lack of code cache space.
1720       if (C->failing()) {
1721         return;
1722       }
1723 
1724       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1725              "ret_addr_offset() not within emitted code");
1726 
1727 #ifdef ASSERT
1728       uint n_size = n->size(C->regalloc());
1729       if (n_size < (current_offset-instr_offset)) {
1730         MachNode* mach = n->as_Mach();
1731         n->dump();
1732         mach->dump_format(C->regalloc(), tty);
1733         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1734         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1735         tty->print_cr(" ------------------- ");
1736         BufferBlob* blob = this->scratch_buffer_blob();
1737         address blob_begin = blob->content_begin();
1738         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1739         assert(false, "wrong size of mach node");
1740       }
1741 #endif
1742       non_safepoints.observe_instruction(n, current_offset);
1743 
1744       // mcall is last "call" that can be a safepoint
1745       // record it so we can see if a poll will directly follow it
1746       // in which case we'll need a pad to make the PcDesc sites unique

3144         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3145         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3146       }
3147     }
3148     // Do not allow defs of new derived values to float above GC
3149     // points unless the base is definitely available at the GC point.
3150 
3151     Node *m = b->get_node(i);
3152 
3153     // Add precedence edge from following safepoint to use of derived pointer
3154     if( last_safept_node != end_node &&
3155         m != last_safept_node) {
3156       for (uint k = 1; k < m->req(); k++) {
3157         const Type *t = m->in(k)->bottom_type();
3158         if( t->isa_oop_ptr() &&
3159             t->is_ptr()->offset() != 0 ) {
3160           last_safept_node->add_prec( m );
3161           break;
3162         }
3163       }













3164     }
3165 
3166     if( n->jvms() ) {           // Precedence edge from derived to safept
3167       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3168       if( b->get_node(last_safept) != last_safept_node ) {
3169         last_safept = b->find_node(last_safept_node);
3170       }
3171       for( uint j=last_safept; j > i; j-- ) {
3172         Node *mach = b->get_node(j);
3173         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3174           mach->add_prec( n );
3175       }
3176       last_safept = i;
3177       last_safept_node = m;
3178     }
3179   }
3180 
3181   if (fat_proj_seen) {
3182     // Garbage collect pinch nodes that were not consumed.
3183     // They are usually created by a fat kill MachProj for a call.

3302 }
3303 #endif
3304 
3305 //-----------------------init_scratch_buffer_blob------------------------------
3306 // Construct a temporary BufferBlob and cache it for this compile.
3307 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3308   // If there is already a scratch buffer blob allocated and the
3309   // constant section is big enough, use it.  Otherwise free the
3310   // current and allocate a new one.
3311   BufferBlob* blob = scratch_buffer_blob();
3312   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3313     // Use the current blob.
3314   } else {
3315     if (blob != nullptr) {
3316       BufferBlob::free(blob);
3317     }
3318 
3319     ResourceMark rm;
3320     _scratch_const_size = const_size;
3321     int size = C2Compiler::initial_code_buffer_size(const_size);



















3322     blob = BufferBlob::create("Compile::scratch_buffer", size);
3323     // Record the buffer blob for next time.
3324     set_scratch_buffer_blob(blob);
3325     // Have we run out of code space?
3326     if (scratch_buffer_blob() == nullptr) {
3327       // Let CompilerBroker disable further compilations.
3328       C->record_failure("Not enough space for scratch buffer in CodeCache");
3329       return;
3330     }
3331   }
3332 
3333   // Initialize the relocation buffers
3334   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3335   set_scratch_locs_memory(locs_buf);
3336 }
3337 
3338 
3339 //-----------------------scratch_emit_size-------------------------------------
3340 // Helper function that computes size by emitting code
3341 uint PhaseOutput::scratch_emit_size(const Node* n) {

3372   buf.insts()->set_scratch_emit();
3373   buf.stubs()->set_scratch_emit();
3374 
3375   // Do the emission.
3376 
3377   Label fakeL; // Fake label for branch instructions.
3378   Label*   saveL = nullptr;
3379   uint save_bnum = 0;
3380   bool is_branch = n->is_MachBranch();
3381   C2_MacroAssembler masm(&buf);
3382   masm.bind(fakeL);
3383   if (is_branch) {
3384     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3385     n->as_MachBranch()->label_set(&fakeL, 0);
3386   }
3387   n->emit(&masm, C->regalloc());
3388 
3389   // Emitting into the scratch buffer should not fail
3390   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3391 
3392   if (is_branch) // Restore label.

3393     n->as_MachBranch()->label_set(saveL, save_bnum);

3394 
3395   // End scratch_emit_size section.
3396   set_in_scratch_emit_size(false);
3397 
3398   return buf.insts_size();
3399 }
3400 
3401 void PhaseOutput::install() {
3402   if (!C->should_install_code()) {
3403     return;
3404   } else if (C->stub_function() != nullptr) {
3405     install_stub(C->stub_name());
3406   } else {
3407     install_code(C->method(),
3408                  C->entry_bci(),
3409                  CompileBroker::compiler2(),
3410                  C->has_unsafe_access(),
3411                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3412   }
3413 }
3414 
3415 void PhaseOutput::install_code(ciMethod*         target,
3416                                int               entry_bci,
3417                                AbstractCompiler* compiler,
3418                                bool              has_unsafe_access,
3419                                bool              has_wide_vectors) {
3420   // Check if we want to skip execution of all compiled code.
3421   {
3422 #ifndef PRODUCT
3423     if (OptoNoExecute) {
3424       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3425       return;
3426     }
3427 #endif
3428     Compile::TracePhase tp(_t_registerMethod);
3429 
3430     if (C->is_osr_compilation()) {
3431       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3432       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3433     } else {
3434       if (!target->is_static()) {
3435         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3436         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3437         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3438         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3439       }
3440       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3441       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3442     }
3443 
3444     C->env()->register_method(target,
3445                                      entry_bci,
3446                                      &_code_offsets,
3447                                      _orig_pc_slot_offset_in_bytes,
3448                                      code_buffer(),
3449                                      frame_size_in_words(),
3450                                      oop_map_set(),
3451                                      &_handler_table,
3452                                      inc_table(),
3453                                      compiler,
3454                                      has_unsafe_access,
3455                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3456                                      C->has_monitors(),
3457                                      C->has_scoped_access(),
3458                                      0);
3459 
3460     if (C->log() != nullptr) { // Print code cache state into compiler log
3461       C->log()->code_cache_state();
3462     }
3463   }
3464 }
3465 void PhaseOutput::install_stub(const char* stub_name) {
3466   // Entry point will be accessed using stub_entry_point();
3467   if (code_buffer() == nullptr) {
3468     Matcher::soft_match_failure();
3469   } else {
3470     if (PrintAssembly && (WizardMode || Verbose))
3471       tty->print_cr("### Stub::%s", stub_name);
3472 
3473     if (!C->failing()) {
3474       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3475 
3476       // Make the NMethod
3477       // For now we mark the frame as never safe for profile stackwalking
3478       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "opto/ad.hpp"
  39 #include "opto/block.hpp"
  40 #include "opto/c2_MacroAssembler.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/type.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 #include "utilities/xmlstream.hpp"
  55 
  56 #ifndef PRODUCT

 225     _first_block_size(0),
 226     _handler_table(),
 227     _inc_table(),
 228     _stub_list(),
 229     _oop_map_set(nullptr),
 230     _scratch_buffer_blob(nullptr),
 231     _scratch_locs_memory(nullptr),
 232     _scratch_const_size(-1),
 233     _in_scratch_emit_size(false),
 234     _frame_slots(0),
 235     _code_offsets(),
 236     _node_bundling_limit(0),
 237     _node_bundling_base(nullptr),
 238     _orig_pc_slot(0),
 239     _orig_pc_slot_offset_in_bytes(0),
 240     _buf_sizes(),
 241     _block(nullptr),
 242     _index(0) {
 243   C->set_output(this);
 244   if (C->stub_name() == nullptr) {
 245     int fixed_slots = C->fixed_slots();
 246     if (C->needs_stack_repair()) {
 247       fixed_slots -= 2;
 248     }
 249     // TODO 8284443 Only reserve extra slot if needed
 250     if (InlineTypeReturnedAsFields) {
 251       fixed_slots -= 2;
 252     }
 253     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 254   }
 255 }
 256 
 257 PhaseOutput::~PhaseOutput() {
 258   C->set_output(nullptr);
 259   if (_scratch_buffer_blob != nullptr) {
 260     BufferBlob::free(_scratch_buffer_blob);
 261   }
 262 }
 263 
 264 void PhaseOutput::perform_mach_node_analysis() {
 265   // Late barrier analysis must be done after schedule and bundle
 266   // Otherwise liveness based spilling will fail
 267   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 268   bs->late_barrier_analysis();
 269 
 270   pd_perform_mach_node_analysis();
 271 
 272   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 273 }
 274 
 275 // Convert Nodes to instruction bits and pass off to the VM
 276 void PhaseOutput::Output() {
 277   // RootNode goes
 278   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 279 
 280   // The number of new nodes (mostly MachNop) is proportional to
 281   // the number of java calls and inner loops which are aligned.
 282   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 283                             C->inner_loops()*(OptoLoopAlignment-1)),
 284                            "out of nodes before code generation" ) ) {
 285     return;
 286   }
 287   // Make sure I can find the Start Node
 288   Block *entry = C->cfg()->get_block(1);
 289   Block *broot = C->cfg()->get_root_block();
 290 
 291   const StartNode *start = entry->head()->as_Start();
 292 
 293   // Replace StartNode with prolog
 294   Label verified_entry;
 295   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 296   entry->map_node(prolog, 0);
 297   C->cfg()->map_node_to_block(prolog, entry);
 298   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 299 
 300   // Virtual methods need an unverified entry point
 301   if (C->is_osr_compilation()) {
 302     if (PoisonOSREntry) {

 303       // TODO: Should use a ShouldNotReachHereNode...
 304       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 305     }
 306   } else {
 307     if (C->method()) {
 308       if (C->method()->has_scalarized_args()) {
 309         // Add entry point to unpack all inline type arguments
 310         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 311         if (!C->method()->is_static()) {
 312           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 313           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 314           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 315           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 316         }
 317       } else if (!C->method()->is_static()) {
 318         // Insert unvalidated entry point
 319         C->cfg()->insert(broot, 0, new MachUEPNode());
 320       }
 321     }

 322   }
 323 
 324   // Break before main entry point
 325   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 326       (OptoBreakpoint && C->is_method_compilation())       ||
 327       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 328       (OptoBreakpointC2R && !C->method())                   ) {
 329     // checking for C->method() means that OptoBreakpoint does not apply to
 330     // runtime stubs or frame converters
 331     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 332   }
 333 
 334   // Insert epilogs before every return
 335   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 336     Block* block = C->cfg()->get_block(i);
 337     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 338       Node* m = block->end();
 339       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 340         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 341         block->add_inst(epilog);
 342         C->cfg()->map_node_to_block(epilog, block);
 343       }
 344     }
 345   }
 346 
 347   // Keeper of sizing aspects
 348   _buf_sizes = BufferSizingData();
 349 
 350   // Initialize code buffer
 351   estimate_buffer_size(_buf_sizes._const);
 352   if (C->failing()) return;
 353 
 354   // Pre-compute the length of blocks and replace
 355   // long branches with short if machine supports it.
 356   // Must be done before ScheduleAndBundle due to SPARC delay slots
 357   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 358   blk_starts[0] = 0;
 359   shorten_branches(blk_starts);
 360 
 361   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 362     // Compute the offsets of the entry points required by the inline type calling convention
 363     if (!C->method()->is_static()) {
 364       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 365       // Entry                     (unverified) @ offset 0
 366       // Verified_Inline_Entry_RO
 367       // Inline_Entry              (unverified)
 368       // Verified_Inline_Entry
 369       uint offset = 0;
 370       _code_offsets.set_value(CodeOffsets::Entry, offset);
 371 
 372       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 373       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 374 
 375       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 376       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 377 
 378       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 379       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 380     } else {
 381       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 382       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 383     }
 384   }
 385 
 386   ScheduleAndBundle();
 387   if (C->failing()) {
 388     return;
 389   }
 390 
 391   perform_mach_node_analysis();
 392 
 393   // Complete sizing of codebuffer
 394   CodeBuffer* cb = init_buffer();
 395   if (cb == nullptr || C->failing()) {
 396     return;
 397   }
 398 
 399   BuildOopMaps();
 400 
 401   if (C->failing())  {
 402     return;
 403   }
 404 
 405   C2_MacroAssembler masm(cb);

 527     // Sum all instruction sizes to compute block size
 528     uint last_inst = block->number_of_nodes();
 529     uint blk_size = 0;
 530     for (uint j = 0; j < last_inst; j++) {
 531       _index = j;
 532       Node* nj = block->get_node(_index);
 533       // Handle machine instruction nodes
 534       if (nj->is_Mach()) {
 535         MachNode* mach = nj->as_Mach();
 536         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 537         reloc_size += mach->reloc();
 538         if (mach->is_MachCall()) {
 539           // add size information for trampoline stub
 540           // class CallStubImpl is platform-specific and defined in the *.ad files.
 541           stub_size  += CallStubImpl::size_call_trampoline();
 542           reloc_size += CallStubImpl::reloc_call_trampoline();
 543 
 544           MachCallNode *mcall = mach->as_MachCall();
 545           // This destination address is NOT PC-relative
 546 
 547           if (mcall->entry_point() != nullptr) {
 548             mcall->method_set((intptr_t)mcall->entry_point());
 549           }
 550 
 551           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 552             stub_size  += CompiledDirectCall::to_interp_stub_size();
 553             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 554           }
 555         } else if (mach->is_MachSafePoint()) {
 556           // If call/safepoint are adjacent, account for possible
 557           // nop to disambiguate the two safepoints.
 558           // ScheduleAndBundle() can rearrange nodes in a block,
 559           // check for all offsets inside this block.
 560           if (last_call_adr >= blk_starts[i]) {
 561             blk_size += nop_size;
 562           }
 563         }
 564         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 565           // Nop is inserted between "avoid back to back" instructions.
 566           // ScheduleAndBundle() can rearrange nodes in a block,
 567           // check for all offsets inside this block.
 568           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 569             blk_size += nop_size;

 784     // New functionality:
 785     //   Assert if the local is not top. In product mode let the new node
 786     //   override the old entry.
 787     assert(local == C->top(), "LocArray collision");
 788     if (local == C->top()) {
 789       return;
 790     }
 791     array->pop();
 792   }
 793   const Type *t = local->bottom_type();
 794 
 795   // Is it a safepoint scalar object node?
 796   if (local->is_SafePointScalarObject()) {
 797     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 798 
 799     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 800     if (sv == nullptr) {
 801       ciKlass* cik = t->is_oopptr()->exact_klass();
 802       assert(cik->is_instance_klass() ||
 803              cik->is_array_klass(), "Not supported allocation.");
 804       uint first_ind = spobj->first_index(sfpt->jvms());
 805       // Nullable, scalarized inline types have a null_marker input
 806       // that needs to be checked before using the field values.
 807       ScopeValue* properties = nullptr;
 808       if (cik->is_inlinetype()) {
 809         Node* null_marker_node = sfpt->in(first_ind++);
 810         assert(null_marker_node != nullptr, "null_marker node not found");
 811         if (!null_marker_node->is_top()) {
 812           const TypeInt* null_marker_type = null_marker_node->bottom_type()->is_int();
 813           if (null_marker_node->is_Con()) {
 814             properties = new ConstantIntValue(null_marker_type->get_con());
 815           } else {
 816             OptoReg::Name null_marker_reg = C->regalloc()->get_reg_first(null_marker_node);
 817             properties = new_loc_value(C->regalloc(), null_marker_reg, Location::normal);
 818           }
 819         }
 820       }
 821       if (cik->is_array_klass() && !cik->is_type_array_klass()) {
 822         jint props = ArrayKlass::ArrayProperties::DEFAULT;
 823         if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
 824           if (cik->as_array_klass()->is_elem_null_free()) {
 825             props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
 826           }
 827           if (!cik->as_array_klass()->is_elem_atomic()) {
 828             props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
 829           }
 830         }
 831         properties = new ConstantIntValue(props);
 832       }
 833       sv = new ObjectValue(spobj->_idx,
 834                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
 835       set_sv_for_object_node(objs, sv);
 836 

 837       for (uint i = 0; i < spobj->n_fields(); i++) {
 838         Node* fld_node = sfpt->in(first_ind+i);
 839         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 840       }
 841     }
 842     array->append(sv);
 843     return;
 844   } else if (local->is_SafePointScalarMerge()) {
 845     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 846     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 847 
 848     if (mv == nullptr) {
 849       GrowableArray<ScopeValue*> deps;
 850 
 851       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 852       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 853       assert(deps.length() == 1, "missing value");
 854 
 855       int selector_idx = smerge->selector_idx(sfpt->jvms());
 856       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

1063       continue;
1064     }
1065 
1066     ObjectValue* other = sv_for_node_id(objs, n->_idx);
1067     if (ov == other) {
1068       return true;
1069     }
1070   }
1071   return false;
1072 }
1073 
1074 //--------------------------Process_OopMap_Node--------------------------------
1075 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1076   // Handle special safepoint nodes for synchronization
1077   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1078   MachCallNode      *mcall;
1079 
1080   int safepoint_pc_offset = current_offset;
1081   bool is_method_handle_invoke = false;
1082   bool return_oop = false;
1083   bool return_scalarized = false;
1084   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1085   bool arg_escape = false;
1086 
1087   // Add the safepoint in the DebugInfoRecorder
1088   if( !mach->is_MachCall() ) {
1089     mcall = nullptr;
1090     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1091   } else {
1092     mcall = mach->as_MachCall();
1093 
1094     // Is the call a MethodHandle call?
1095     if (mcall->is_MachCallJava()) {
1096       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1097         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1098         is_method_handle_invoke = true;
1099       }
1100       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1101     }
1102 
1103     // Check if a call returns an object.
1104     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1105       return_oop = true;
1106     }
1107     if (mcall->returns_scalarized()) {
1108       return_scalarized = true;
1109     }
1110     safepoint_pc_offset += mcall->ret_addr_offset();
1111     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1112   }
1113 
1114   // Loop over the JVMState list to add scope information
1115   // Do not skip safepoints with a null method, they need monitor info
1116   JVMState* youngest_jvms = sfn->jvms();
1117   int max_depth = youngest_jvms->depth();
1118 
1119   // Allocate the object pool for scalar-replaced objects -- the map from
1120   // small-integer keys (which can be recorded in the local and ostack
1121   // arrays) to descriptions of the object state.
1122   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1123 
1124   // Visit scopes from oldest to youngest.
1125   for (int depth = 1; depth <= max_depth; depth++) {
1126     JVMState* jvms = youngest_jvms->of_depth(depth);
1127     int idx;
1128     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1129     // Safepoints that do not have method() set only provide oop-map and monitor info

1158     // Build the growable array of ScopeValues for exp stack
1159     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1160 
1161     // Loop over monitors and insert into array
1162     for (idx = 0; idx < num_mon; idx++) {
1163       // Grab the node that defines this monitor
1164       Node* box_node = sfn->monitor_box(jvms, idx);
1165       Node* obj_node = sfn->monitor_obj(jvms, idx);
1166 
1167       // Create ScopeValue for object
1168       ScopeValue *scval = nullptr;
1169 
1170       if (obj_node->is_SafePointScalarObject()) {
1171         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1172         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1173         if (scval == nullptr) {
1174           const Type *t = spobj->bottom_type();
1175           ciKlass* cik = t->is_oopptr()->exact_klass();
1176           assert(cik->is_instance_klass() ||
1177                  cik->is_array_klass(), "Not supported allocation.");
1178           assert(!cik->is_inlinetype(), "Synchronization on value object?");
1179           ScopeValue* properties = nullptr;
1180           if (cik->is_array_klass() && !cik->is_type_array_klass()) {
1181             jint props = ArrayKlass::ArrayProperties::DEFAULT;
1182             if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
1183               if (cik->as_array_klass()->is_elem_null_free()) {
1184                 props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
1185               }
1186               if (!cik->as_array_klass()->is_elem_atomic()) {
1187                 props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
1188               }
1189             }
1190             properties = new ConstantIntValue(props);
1191           }
1192           ObjectValue* sv = new ObjectValue(spobj->_idx,
1193                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
1194           PhaseOutput::set_sv_for_object_node(objs, sv);
1195 
1196           uint first_ind = spobj->first_index(youngest_jvms);
1197           for (uint i = 0; i < spobj->n_fields(); i++) {
1198             Node* fld_node = sfn->in(first_ind+i);
1199             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1200           }
1201           scval = sv;
1202         }
1203       } else if (obj_node->is_SafePointScalarMerge()) {
1204         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1205         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1206 
1207         if (mv == nullptr) {
1208           GrowableArray<ScopeValue*> deps;
1209 
1210           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1211           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1212           assert(deps.length() == 1, "missing value");
1213 

1281     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1282     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1283 
1284     // Make method available for all Safepoints
1285     ciMethod* scope_method = method ? method : C->method();
1286     // Describe the scope here
1287     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1288     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1289     // Now we can describe the scope.
1290     methodHandle null_mh;
1291     bool rethrow_exception = false;
1292     C->debug_info()->describe_scope(
1293       safepoint_pc_offset,
1294       null_mh,
1295       scope_method,
1296       jvms->bci(),
1297       jvms->should_reexecute(),
1298       rethrow_exception,
1299       is_method_handle_invoke,
1300       return_oop,
1301       return_scalarized,
1302       has_ea_local_in_scope,
1303       arg_escape,
1304       locvals,
1305       expvals,
1306       monvals
1307     );
1308   } // End jvms loop
1309 
1310   // Mark the end of the scope set.
1311   C->debug_info()->end_safepoint(safepoint_pc_offset);
1312 }
1313 
1314 
1315 
1316 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1317 class NonSafepointEmitter {
1318     Compile*  C;
1319     JVMState* _pending_jvms;
1320     int       _pending_offset;
1321 

1657           MachNode *nop = new MachNopNode(nops_cnt);
1658           block->insert_node(nop, j++);
1659           last_inst++;
1660           C->cfg()->map_node_to_block(nop, block);
1661           // Ensure enough space.
1662           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1663           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1664             C->record_failure("CodeCache is full");
1665             return;
1666           }
1667           nop->emit(masm, C->regalloc());
1668           masm->code()->flush_bundle(true);
1669           current_offset = masm->offset();
1670         }
1671 
1672         bool observe_safepoint = is_sfn;
1673         // Remember the start of the last call in a basic block
1674         if (is_mcall) {
1675           MachCallNode *mcall = mach->as_MachCall();
1676 
1677           if (mcall->entry_point() != nullptr) {
1678             // This destination address is NOT PC-relative
1679             mcall->method_set((intptr_t)mcall->entry_point());
1680           }
1681 
1682           // Save the return address
1683           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1684 
1685           observe_safepoint = mcall->guaranteed_safepoint();
1686         }
1687 
1688         // sfn will be valid whenever mcall is valid now because of inheritance
1689         if (observe_safepoint) {
1690           // Handle special safepoint nodes for synchronization
1691           if (!is_mcall) {
1692             MachSafePointNode *sfn = mach->as_MachSafePoint();
1693             // !!!!! Stubs only need an oopmap right now, so bail out
1694             if (sfn->jvms()->method() == nullptr) {
1695               // Write the oopmap directly to the code blob??!!
1696               continue;
1697             }
1698           } // End synchronization
1699 
1700           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1801       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1802         node_offsets[n->_idx] = masm->offset();
1803       }
1804 #endif
1805       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1806 
1807       // "Normal" instruction case
1808       DEBUG_ONLY(uint instr_offset = masm->offset());
1809       n->emit(masm, C->regalloc());
1810       current_offset = masm->offset();
1811 
1812       // Above we only verified that there is enough space in the instruction section.
1813       // However, the instruction may emit stubs that cause code buffer expansion.
1814       // Bail out here if expansion failed due to a lack of code cache space.
1815       if (C->failing()) {
1816         return;
1817       }
1818 
1819       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1820              "ret_addr_offset() not within emitted code");

1821 #ifdef ASSERT
1822       uint n_size = n->size(C->regalloc());
1823       if (n_size < (current_offset-instr_offset)) {
1824         MachNode* mach = n->as_Mach();
1825         n->dump();
1826         mach->dump_format(C->regalloc(), tty);
1827         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1828         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1829         tty->print_cr(" ------------------- ");
1830         BufferBlob* blob = this->scratch_buffer_blob();
1831         address blob_begin = blob->content_begin();
1832         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1833         assert(false, "wrong size of mach node");
1834       }
1835 #endif
1836       non_safepoints.observe_instruction(n, current_offset);
1837 
1838       // mcall is last "call" that can be a safepoint
1839       // record it so we can see if a poll will directly follow it
1840       // in which case we'll need a pad to make the PcDesc sites unique

3238         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3239         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3240       }
3241     }
3242     // Do not allow defs of new derived values to float above GC
3243     // points unless the base is definitely available at the GC point.
3244 
3245     Node *m = b->get_node(i);
3246 
3247     // Add precedence edge from following safepoint to use of derived pointer
3248     if( last_safept_node != end_node &&
3249         m != last_safept_node) {
3250       for (uint k = 1; k < m->req(); k++) {
3251         const Type *t = m->in(k)->bottom_type();
3252         if( t->isa_oop_ptr() &&
3253             t->is_ptr()->offset() != 0 ) {
3254           last_safept_node->add_prec( m );
3255           break;
3256         }
3257       }
3258 
3259       // Do not allow a CheckCastPP node whose input is a raw pointer to
3260       // float past a safepoint.  This can occur when a buffered inline
3261       // type is allocated in a loop and the CheckCastPP from that
3262       // allocation is reused outside the loop.  If the use inside the
3263       // loop is scalarized the CheckCastPP will no longer be connected
3264       // to the loop safepoint.  See JDK-8264340.
3265       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3266         Node *def = m->in(1);
3267         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3268           last_safept_node->add_prec(m);
3269         }
3270       }
3271     }
3272 
3273     if( n->jvms() ) {           // Precedence edge from derived to safept
3274       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3275       if( b->get_node(last_safept) != last_safept_node ) {
3276         last_safept = b->find_node(last_safept_node);
3277       }
3278       for( uint j=last_safept; j > i; j-- ) {
3279         Node *mach = b->get_node(j);
3280         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3281           mach->add_prec( n );
3282       }
3283       last_safept = i;
3284       last_safept_node = m;
3285     }
3286   }
3287 
3288   if (fat_proj_seen) {
3289     // Garbage collect pinch nodes that were not consumed.
3290     // They are usually created by a fat kill MachProj for a call.

3409 }
3410 #endif
3411 
3412 //-----------------------init_scratch_buffer_blob------------------------------
3413 // Construct a temporary BufferBlob and cache it for this compile.
3414 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3415   // If there is already a scratch buffer blob allocated and the
3416   // constant section is big enough, use it.  Otherwise free the
3417   // current and allocate a new one.
3418   BufferBlob* blob = scratch_buffer_blob();
3419   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3420     // Use the current blob.
3421   } else {
3422     if (blob != nullptr) {
3423       BufferBlob::free(blob);
3424     }
3425 
3426     ResourceMark rm;
3427     _scratch_const_size = const_size;
3428     int size = C2Compiler::initial_code_buffer_size(const_size);
3429     if (C->has_scalarized_args()) {
3430       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3431       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3432       ciMethod* method = C->method();
3433       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3434       int arg_num = 0;
3435       if (!method->is_static()) {
3436         if (method->is_scalarized_arg(arg_num)) {
3437           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3438         }
3439         arg_num++;
3440       }
3441       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3442         if (method->is_scalarized_arg(arg_num)) {
3443           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3444         }
3445         arg_num++;
3446       }
3447     }
3448     blob = BufferBlob::create("Compile::scratch_buffer", size);
3449     // Record the buffer blob for next time.
3450     set_scratch_buffer_blob(blob);
3451     // Have we run out of code space?
3452     if (scratch_buffer_blob() == nullptr) {
3453       // Let CompilerBroker disable further compilations.
3454       C->record_failure("Not enough space for scratch buffer in CodeCache");
3455       return;
3456     }
3457   }
3458 
3459   // Initialize the relocation buffers
3460   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3461   set_scratch_locs_memory(locs_buf);
3462 }
3463 
3464 
3465 //-----------------------scratch_emit_size-------------------------------------
3466 // Helper function that computes size by emitting code
3467 uint PhaseOutput::scratch_emit_size(const Node* n) {

3498   buf.insts()->set_scratch_emit();
3499   buf.stubs()->set_scratch_emit();
3500 
3501   // Do the emission.
3502 
3503   Label fakeL; // Fake label for branch instructions.
3504   Label*   saveL = nullptr;
3505   uint save_bnum = 0;
3506   bool is_branch = n->is_MachBranch();
3507   C2_MacroAssembler masm(&buf);
3508   masm.bind(fakeL);
3509   if (is_branch) {
3510     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3511     n->as_MachBranch()->label_set(&fakeL, 0);
3512   }
3513   n->emit(&masm, C->regalloc());
3514 
3515   // Emitting into the scratch buffer should not fail
3516   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3517 
3518   // Restore label.
3519   if (is_branch) {
3520     n->as_MachBranch()->label_set(saveL, save_bnum);
3521   }
3522 
3523   // End scratch_emit_size section.
3524   set_in_scratch_emit_size(false);
3525 
3526   return buf.insts_size();
3527 }
3528 
3529 void PhaseOutput::install() {
3530   if (!C->should_install_code()) {
3531     return;
3532   } else if (C->stub_function() != nullptr) {
3533     install_stub(C->stub_name());
3534   } else {
3535     install_code(C->method(),
3536                  C->entry_bci(),
3537                  CompileBroker::compiler2(),
3538                  C->has_unsafe_access(),
3539                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3540   }
3541 }
3542 
3543 void PhaseOutput::install_code(ciMethod*         target,
3544                                int               entry_bci,
3545                                AbstractCompiler* compiler,
3546                                bool              has_unsafe_access,
3547                                bool              has_wide_vectors) {
3548   // Check if we want to skip execution of all compiled code.
3549   {
3550 #ifndef PRODUCT
3551     if (OptoNoExecute) {
3552       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3553       return;
3554     }
3555 #endif
3556     Compile::TracePhase tp(_t_registerMethod);
3557 
3558     if (C->is_osr_compilation()) {
3559       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3560       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3561     } else {






3562       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3563       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3564         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3565       }
3566       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3567         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3568       }
3569       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3570         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3571       }
3572       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3573     }
3574 
3575     C->env()->register_method(target,
3576                               entry_bci,
3577                               &_code_offsets,
3578                               _orig_pc_slot_offset_in_bytes,
3579                               code_buffer(),
3580                               frame_size_in_words(),
3581                               _oop_map_set,
3582                               &_handler_table,
3583                               inc_table(),
3584                               compiler,
3585                               has_unsafe_access,
3586                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3587                               C->has_monitors(),
3588                               C->has_scoped_access(),
3589                               0);
3590 
3591     if (C->log() != nullptr) { // Print code cache state into compiler log
3592       C->log()->code_cache_state();
3593     }
3594   }
3595 }
3596 void PhaseOutput::install_stub(const char* stub_name) {
3597   // Entry point will be accessed using stub_entry_point();
3598   if (code_buffer() == nullptr) {
3599     Matcher::soft_match_failure();
3600   } else {
3601     if (PrintAssembly && (WizardMode || Verbose))
3602       tty->print_cr("### Stub::%s", stub_name);
3603 
3604     if (!C->failing()) {
3605       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3606 
3607       // Make the NMethod
3608       // For now we mark the frame as never safe for profile stackwalking
3609       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >