< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"

  36 #include "memory/allocation.hpp"
  37 #include "opto/ad.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/c2_MacroAssembler.hpp"
  40 #include "opto/c2compiler.hpp"
  41 #include "opto/callnode.hpp"
  42 #include "opto/cfgnode.hpp"
  43 #include "opto/locknode.hpp"
  44 #include "opto/machnode.hpp"
  45 #include "opto/node.hpp"
  46 #include "opto/optoreg.hpp"
  47 #include "opto/output.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/type.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/macros.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 #include "utilities/xmlstream.hpp"
  54 
  55 #ifndef PRODUCT

 224     _first_block_size(0),
 225     _handler_table(),
 226     _inc_table(),
 227     _stub_list(),
 228     _oop_map_set(nullptr),
 229     _scratch_buffer_blob(nullptr),
 230     _scratch_locs_memory(nullptr),
 231     _scratch_const_size(-1),
 232     _in_scratch_emit_size(false),
 233     _frame_slots(0),
 234     _code_offsets(),
 235     _node_bundling_limit(0),
 236     _node_bundling_base(nullptr),
 237     _orig_pc_slot(0),
 238     _orig_pc_slot_offset_in_bytes(0),
 239     _buf_sizes(),
 240     _block(nullptr),
 241     _index(0) {
 242   C->set_output(this);
 243   if (C->stub_name() == nullptr) {
 244     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 245   }
 246 }
 247 
 248 PhaseOutput::~PhaseOutput() {
 249   C->set_output(nullptr);
 250   if (_scratch_buffer_blob != nullptr) {
 251     BufferBlob::free(_scratch_buffer_blob);
 252   }
 253 }
 254 
 255 void PhaseOutput::perform_mach_node_analysis() {
 256   // Late barrier analysis must be done after schedule and bundle
 257   // Otherwise liveness based spilling will fail
 258   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 259   bs->late_barrier_analysis();
 260 
 261   pd_perform_mach_node_analysis();
 262 
 263   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 264 }
 265 
 266 // Convert Nodes to instruction bits and pass off to the VM
 267 void PhaseOutput::Output() {
 268   // RootNode goes
 269   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 270 
 271   // The number of new nodes (mostly MachNop) is proportional to
 272   // the number of java calls and inner loops which are aligned.
 273   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 274                             C->inner_loops()*(OptoLoopAlignment-1)),
 275                            "out of nodes before code generation" ) ) {
 276     return;
 277   }
 278   // Make sure I can find the Start Node
 279   Block *entry = C->cfg()->get_block(1);
 280   Block *broot = C->cfg()->get_root_block();
 281 
 282   const StartNode *start = entry->head()->as_Start();
 283 
 284   // Replace StartNode with prolog
 285   MachPrologNode *prolog = new MachPrologNode();

 286   entry->map_node(prolog, 0);
 287   C->cfg()->map_node_to_block(prolog, entry);
 288   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 289 
 290   // Virtual methods need an unverified entry point
 291 
 292   if( C->is_osr_compilation() ) {
 293     if( PoisonOSREntry ) {
 294       // TODO: Should use a ShouldNotReachHereNode...
 295       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 296     }
 297   } else {
 298     if( C->method() && !C->method()->flags().is_static() ) {
 299       // Insert unvalidated entry point
 300       C->cfg()->insert( broot, 0, new MachUEPNode() );











 301     }
 302 
 303   }
 304 
 305   // Break before main entry point
 306   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 307       (OptoBreakpoint && C->is_method_compilation())       ||
 308       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 309       (OptoBreakpointC2R && !C->method())                   ) {
 310     // checking for C->method() means that OptoBreakpoint does not apply to
 311     // runtime stubs or frame converters
 312     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 313   }
 314 
 315   // Insert epilogs before every return
 316   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 317     Block* block = C->cfg()->get_block(i);
 318     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 319       Node* m = block->end();
 320       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 321         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 322         block->add_inst(epilog);
 323         C->cfg()->map_node_to_block(epilog, block);
 324       }
 325     }
 326   }
 327 
 328   // Keeper of sizing aspects
 329   _buf_sizes = BufferSizingData();
 330 
 331   // Initialize code buffer
 332   estimate_buffer_size(_buf_sizes._const);
 333   if (C->failing()) return;
 334 
 335   // Pre-compute the length of blocks and replace
 336   // long branches with short if machine supports it.
 337   // Must be done before ScheduleAndBundle due to SPARC delay slots
 338   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 339   blk_starts[0] = 0;
 340   shorten_branches(blk_starts);
 341 

























 342   ScheduleAndBundle();
 343   if (C->failing()) {
 344     return;
 345   }
 346 
 347   perform_mach_node_analysis();
 348 
 349   // Complete sizing of codebuffer
 350   CodeBuffer* cb = init_buffer();
 351   if (cb == nullptr || C->failing()) {
 352     return;
 353   }
 354 
 355   BuildOopMaps();
 356 
 357   if (C->failing())  {
 358     return;
 359   }
 360 
 361   C2_MacroAssembler masm(cb);

 483     // Sum all instruction sizes to compute block size
 484     uint last_inst = block->number_of_nodes();
 485     uint blk_size = 0;
 486     for (uint j = 0; j < last_inst; j++) {
 487       _index = j;
 488       Node* nj = block->get_node(_index);
 489       // Handle machine instruction nodes
 490       if (nj->is_Mach()) {
 491         MachNode* mach = nj->as_Mach();
 492         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 493         reloc_size += mach->reloc();
 494         if (mach->is_MachCall()) {
 495           // add size information for trampoline stub
 496           // class CallStubImpl is platform-specific and defined in the *.ad files.
 497           stub_size  += CallStubImpl::size_call_trampoline();
 498           reloc_size += CallStubImpl::reloc_call_trampoline();
 499 
 500           MachCallNode *mcall = mach->as_MachCall();
 501           // This destination address is NOT PC-relative
 502 
 503           mcall->method_set((intptr_t)mcall->entry_point());


 504 
 505           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 506             stub_size  += CompiledDirectCall::to_interp_stub_size();
 507             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 508           }
 509         } else if (mach->is_MachSafePoint()) {
 510           // If call/safepoint are adjacent, account for possible
 511           // nop to disambiguate the two safepoints.
 512           // ScheduleAndBundle() can rearrange nodes in a block,
 513           // check for all offsets inside this block.
 514           if (last_call_adr >= blk_starts[i]) {
 515             blk_size += nop_size;
 516           }
 517         }
 518         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 519           // Nop is inserted between "avoid back to back" instructions.
 520           // ScheduleAndBundle() can rearrange nodes in a block,
 521           // check for all offsets inside this block.
 522           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 523             blk_size += nop_size;

 738     // New functionality:
 739     //   Assert if the local is not top. In product mode let the new node
 740     //   override the old entry.
 741     assert(local == C->top(), "LocArray collision");
 742     if (local == C->top()) {
 743       return;
 744     }
 745     array->pop();
 746   }
 747   const Type *t = local->bottom_type();
 748 
 749   // Is it a safepoint scalar object node?
 750   if (local->is_SafePointScalarObject()) {
 751     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 752 
 753     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 754     if (sv == nullptr) {
 755       ciKlass* cik = t->is_oopptr()->exact_klass();
 756       assert(cik->is_instance_klass() ||
 757              cik->is_array_klass(), "Not supported allocation.");





























 758       sv = new ObjectValue(spobj->_idx,
 759                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 760       set_sv_for_object_node(objs, sv);
 761 
 762       uint first_ind = spobj->first_index(sfpt->jvms());
 763       for (uint i = 0; i < spobj->n_fields(); i++) {
 764         Node* fld_node = sfpt->in(first_ind+i);
 765         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 766       }
 767     }
 768     array->append(sv);
 769     return;
 770   } else if (local->is_SafePointScalarMerge()) {
 771     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 772     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 773 
 774     if (mv == nullptr) {
 775       GrowableArray<ScopeValue*> deps;
 776 
 777       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 778       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 779       assert(deps.length() == 1, "missing value");
 780 
 781       int selector_idx = smerge->selector_idx(sfpt->jvms());
 782       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

 989       continue;
 990     }
 991 
 992     ObjectValue* other = sv_for_node_id(objs, n->_idx);
 993     if (ov == other) {
 994       return true;
 995     }
 996   }
 997   return false;
 998 }
 999 
1000 //--------------------------Process_OopMap_Node--------------------------------
1001 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1002   // Handle special safepoint nodes for synchronization
1003   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1004   MachCallNode      *mcall;
1005 
1006   int safepoint_pc_offset = current_offset;
1007   bool is_method_handle_invoke = false;
1008   bool return_oop = false;

1009   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1010   bool arg_escape = false;
1011 
1012   // Add the safepoint in the DebugInfoRecorder
1013   if( !mach->is_MachCall() ) {
1014     mcall = nullptr;
1015     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1016   } else {
1017     mcall = mach->as_MachCall();
1018 
1019     // Is the call a MethodHandle call?
1020     if (mcall->is_MachCallJava()) {
1021       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1022         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1023         is_method_handle_invoke = true;
1024       }
1025       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1026     }
1027 
1028     // Check if a call returns an object.
1029     if (mcall->returns_pointer()) {
1030       return_oop = true;
1031     }



1032     safepoint_pc_offset += mcall->ret_addr_offset();
1033     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1034   }
1035 
1036   // Loop over the JVMState list to add scope information
1037   // Do not skip safepoints with a null method, they need monitor info
1038   JVMState* youngest_jvms = sfn->jvms();
1039   int max_depth = youngest_jvms->depth();
1040 
1041   // Allocate the object pool for scalar-replaced objects -- the map from
1042   // small-integer keys (which can be recorded in the local and ostack
1043   // arrays) to descriptions of the object state.
1044   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1045 
1046   // Visit scopes from oldest to youngest.
1047   for (int depth = 1; depth <= max_depth; depth++) {
1048     JVMState* jvms = youngest_jvms->of_depth(depth);
1049     int idx;
1050     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1051     // Safepoints that do not have method() set only provide oop-map and monitor info

1081     // Build the growable array of ScopeValues for exp stack
1082     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1083 
1084     // Loop over monitors and insert into array
1085     for (idx = 0; idx < num_mon; idx++) {
1086       // Grab the node that defines this monitor
1087       Node* box_node = sfn->monitor_box(jvms, idx);
1088       Node* obj_node = sfn->monitor_obj(jvms, idx);
1089 
1090       // Create ScopeValue for object
1091       ScopeValue *scval = nullptr;
1092 
1093       if (obj_node->is_SafePointScalarObject()) {
1094         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1095         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1096         if (scval == nullptr) {
1097           const Type *t = spobj->bottom_type();
1098           ciKlass* cik = t->is_oopptr()->exact_klass();
1099           assert(cik->is_instance_klass() ||
1100                  cik->is_array_klass(), "Not supported allocation.");














1101           ObjectValue* sv = new ObjectValue(spobj->_idx,
1102                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
1103           PhaseOutput::set_sv_for_object_node(objs, sv);
1104 
1105           uint first_ind = spobj->first_index(youngest_jvms);
1106           for (uint i = 0; i < spobj->n_fields(); i++) {
1107             Node* fld_node = sfn->in(first_ind+i);
1108             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1109           }
1110           scval = sv;
1111         }
1112       } else if (obj_node->is_SafePointScalarMerge()) {
1113         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1114         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1115 
1116         if (mv == nullptr) {
1117           GrowableArray<ScopeValue*> deps;
1118 
1119           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1120           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1121           assert(deps.length() == 1, "missing value");
1122 

1190     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1191     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1192 
1193     // Make method available for all Safepoints
1194     ciMethod* scope_method = method ? method : C->method();
1195     // Describe the scope here
1196     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1197     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1198     // Now we can describe the scope.
1199     methodHandle null_mh;
1200     bool rethrow_exception = false;
1201     C->debug_info()->describe_scope(
1202       safepoint_pc_offset,
1203       null_mh,
1204       scope_method,
1205       jvms->bci(),
1206       jvms->should_reexecute(),
1207       rethrow_exception,
1208       is_method_handle_invoke,
1209       return_oop,

1210       has_ea_local_in_scope,
1211       arg_escape,
1212       locvals,
1213       expvals,
1214       monvals
1215     );
1216   } // End jvms loop
1217 
1218   // Mark the end of the scope set.
1219   C->debug_info()->end_safepoint(safepoint_pc_offset);
1220 }
1221 
1222 
1223 
1224 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1225 class NonSafepointEmitter {
1226     Compile*  C;
1227     JVMState* _pending_jvms;
1228     int       _pending_offset;
1229 

1565           MachNode *nop = new MachNopNode(nops_cnt);
1566           block->insert_node(nop, j++);
1567           last_inst++;
1568           C->cfg()->map_node_to_block(nop, block);
1569           // Ensure enough space.
1570           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1571           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1572             C->record_failure("CodeCache is full");
1573             return;
1574           }
1575           nop->emit(masm, C->regalloc());
1576           masm->code()->flush_bundle(true);
1577           current_offset = masm->offset();
1578         }
1579 
1580         bool observe_safepoint = is_sfn;
1581         // Remember the start of the last call in a basic block
1582         if (is_mcall) {
1583           MachCallNode *mcall = mach->as_MachCall();
1584 
1585           // This destination address is NOT PC-relative
1586           mcall->method_set((intptr_t)mcall->entry_point());


1587 
1588           // Save the return address
1589           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1590 
1591           observe_safepoint = mcall->guaranteed_safepoint();
1592         }
1593 
1594         // sfn will be valid whenever mcall is valid now because of inheritance
1595         if (observe_safepoint) {
1596           // Handle special safepoint nodes for synchronization
1597           if (!is_mcall) {
1598             MachSafePointNode *sfn = mach->as_MachSafePoint();
1599             // !!!!! Stubs only need an oopmap right now, so bail out
1600             if (sfn->jvms()->method() == nullptr) {
1601               // Write the oopmap directly to the code blob??!!
1602               continue;
1603             }
1604           } // End synchronization
1605 
1606           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1707       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1708         node_offsets[n->_idx] = masm->offset();
1709       }
1710 #endif
1711       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1712 
1713       // "Normal" instruction case
1714       DEBUG_ONLY(uint instr_offset = masm->offset());
1715       n->emit(masm, C->regalloc());
1716       current_offset = masm->offset();
1717 
1718       // Above we only verified that there is enough space in the instruction section.
1719       // However, the instruction may emit stubs that cause code buffer expansion.
1720       // Bail out here if expansion failed due to a lack of code cache space.
1721       if (C->failing()) {
1722         return;
1723       }
1724 
1725       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1726              "ret_addr_offset() not within emitted code");
1727 
1728 #ifdef ASSERT
1729       uint n_size = n->size(C->regalloc());
1730       if (n_size < (current_offset-instr_offset)) {
1731         MachNode* mach = n->as_Mach();
1732         n->dump();
1733         mach->dump_format(C->regalloc(), tty);
1734         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1735         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1736         tty->print_cr(" ------------------- ");
1737         BufferBlob* blob = this->scratch_buffer_blob();
1738         address blob_begin = blob->content_begin();
1739         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1740         assert(false, "wrong size of mach node");
1741       }
1742 #endif
1743       non_safepoints.observe_instruction(n, current_offset);
1744 
1745       // mcall is last "call" that can be a safepoint
1746       // record it so we can see if a poll will directly follow it
1747       // in which case we'll need a pad to make the PcDesc sites unique

3145         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3146         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3147       }
3148     }
3149     // Do not allow defs of new derived values to float above GC
3150     // points unless the base is definitely available at the GC point.
3151 
3152     Node *m = b->get_node(i);
3153 
3154     // Add precedence edge from following safepoint to use of derived pointer
3155     if( last_safept_node != end_node &&
3156         m != last_safept_node) {
3157       for (uint k = 1; k < m->req(); k++) {
3158         const Type *t = m->in(k)->bottom_type();
3159         if( t->isa_oop_ptr() &&
3160             t->is_ptr()->offset() != 0 ) {
3161           last_safept_node->add_prec( m );
3162           break;
3163         }
3164       }













3165     }
3166 
3167     if( n->jvms() ) {           // Precedence edge from derived to safept
3168       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3169       if( b->get_node(last_safept) != last_safept_node ) {
3170         last_safept = b->find_node(last_safept_node);
3171       }
3172       for( uint j=last_safept; j > i; j-- ) {
3173         Node *mach = b->get_node(j);
3174         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3175           mach->add_prec( n );
3176       }
3177       last_safept = i;
3178       last_safept_node = m;
3179     }
3180   }
3181 
3182   if (fat_proj_seen) {
3183     // Garbage collect pinch nodes that were not consumed.
3184     // They are usually created by a fat kill MachProj for a call.

3303 }
3304 #endif
3305 
3306 //-----------------------init_scratch_buffer_blob------------------------------
3307 // Construct a temporary BufferBlob and cache it for this compile.
3308 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3309   // If there is already a scratch buffer blob allocated and the
3310   // constant section is big enough, use it.  Otherwise free the
3311   // current and allocate a new one.
3312   BufferBlob* blob = scratch_buffer_blob();
3313   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3314     // Use the current blob.
3315   } else {
3316     if (blob != nullptr) {
3317       BufferBlob::free(blob);
3318     }
3319 
3320     ResourceMark rm;
3321     _scratch_const_size = const_size;
3322     int size = C2Compiler::initial_code_buffer_size(const_size);



















3323     blob = BufferBlob::create("Compile::scratch_buffer", size);
3324     // Record the buffer blob for next time.
3325     set_scratch_buffer_blob(blob);
3326     // Have we run out of code space?
3327     if (scratch_buffer_blob() == nullptr) {
3328       // Let CompilerBroker disable further compilations.
3329       C->record_failure("Not enough space for scratch buffer in CodeCache");
3330       return;
3331     }
3332   }
3333 
3334   // Initialize the relocation buffers
3335   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3336   set_scratch_locs_memory(locs_buf);
3337 }
3338 
3339 
3340 //-----------------------scratch_emit_size-------------------------------------
3341 // Helper function that computes size by emitting code
3342 uint PhaseOutput::scratch_emit_size(const Node* n) {

3373   buf.insts()->set_scratch_emit();
3374   buf.stubs()->set_scratch_emit();
3375 
3376   // Do the emission.
3377 
3378   Label fakeL; // Fake label for branch instructions.
3379   Label*   saveL = nullptr;
3380   uint save_bnum = 0;
3381   bool is_branch = n->is_MachBranch();
3382   C2_MacroAssembler masm(&buf);
3383   masm.bind(fakeL);
3384   if (is_branch) {
3385     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3386     n->as_MachBranch()->label_set(&fakeL, 0);
3387   }
3388   n->emit(&masm, C->regalloc());
3389 
3390   // Emitting into the scratch buffer should not fail
3391   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3392 
3393   if (is_branch) // Restore label.

3394     n->as_MachBranch()->label_set(saveL, save_bnum);

3395 
3396   // End scratch_emit_size section.
3397   set_in_scratch_emit_size(false);
3398 
3399   return buf.insts_size();
3400 }
3401 
3402 void PhaseOutput::install() {
3403   if (!C->should_install_code()) {
3404     return;
3405   } else if (C->stub_function() != nullptr) {
3406     install_stub(C->stub_name());
3407   } else {
3408     install_code(C->method(),
3409                  C->entry_bci(),
3410                  CompileBroker::compiler2(),
3411                  C->has_unsafe_access(),
3412                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3413   }
3414 }
3415 
3416 void PhaseOutput::install_code(ciMethod*         target,
3417                                int               entry_bci,
3418                                AbstractCompiler* compiler,
3419                                bool              has_unsafe_access,
3420                                bool              has_wide_vectors) {
3421   // Check if we want to skip execution of all compiled code.
3422   {
3423 #ifndef PRODUCT
3424     if (OptoNoExecute) {
3425       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3426       return;
3427     }
3428 #endif
3429     Compile::TracePhase tp(_t_registerMethod);
3430 
3431     if (C->is_osr_compilation()) {
3432       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3433       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3434     } else {
3435       if (!target->is_static()) {
3436         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3437         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3438         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3439         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3440       }
3441       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3442       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3443     }
3444 
3445     C->env()->register_method(target,
3446                                      entry_bci,
3447                                      &_code_offsets,
3448                                      _orig_pc_slot_offset_in_bytes,
3449                                      code_buffer(),
3450                                      frame_size_in_words(),
3451                                      oop_map_set(),
3452                                      &_handler_table,
3453                                      inc_table(),
3454                                      compiler,
3455                                      has_unsafe_access,
3456                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3457                                      C->has_monitors(),
3458                                      C->has_scoped_access(),
3459                                      0);
3460 
3461     if (C->log() != nullptr) { // Print code cache state into compiler log
3462       C->log()->code_cache_state();
3463     }
3464   }
3465 }
3466 void PhaseOutput::install_stub(const char* stub_name) {
3467   // Entry point will be accessed using stub_entry_point();
3468   if (code_buffer() == nullptr) {
3469     Matcher::soft_match_failure();
3470   } else {
3471     if (PrintAssembly && (WizardMode || Verbose))
3472       tty->print_cr("### Stub::%s", stub_name);
3473 
3474     if (!C->failing()) {
3475       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3476 
3477       // Make the NMethod
3478       // For now we mark the frame as never safe for profile stackwalking
3479       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "opto/ad.hpp"
  39 #include "opto/block.hpp"
  40 #include "opto/c2_MacroAssembler.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/type.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 #include "utilities/xmlstream.hpp"
  55 
  56 #ifndef PRODUCT

 225     _first_block_size(0),
 226     _handler_table(),
 227     _inc_table(),
 228     _stub_list(),
 229     _oop_map_set(nullptr),
 230     _scratch_buffer_blob(nullptr),
 231     _scratch_locs_memory(nullptr),
 232     _scratch_const_size(-1),
 233     _in_scratch_emit_size(false),
 234     _frame_slots(0),
 235     _code_offsets(),
 236     _node_bundling_limit(0),
 237     _node_bundling_base(nullptr),
 238     _orig_pc_slot(0),
 239     _orig_pc_slot_offset_in_bytes(0),
 240     _buf_sizes(),
 241     _block(nullptr),
 242     _index(0) {
 243   C->set_output(this);
 244   if (C->stub_name() == nullptr) {
 245     int fixed_slots = C->fixed_slots();
 246     if (C->needs_stack_repair()) {
 247       fixed_slots -= 2;
 248     }
 249     // TODO 8284443 Only reserve extra slot if needed
 250     if (InlineTypeReturnedAsFields) {
 251       fixed_slots -= 2;
 252     }
 253     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 254   }
 255 }
 256 
 257 PhaseOutput::~PhaseOutput() {
 258   C->set_output(nullptr);
 259   if (_scratch_buffer_blob != nullptr) {
 260     BufferBlob::free(_scratch_buffer_blob);
 261   }
 262 }
 263 
 264 void PhaseOutput::perform_mach_node_analysis() {
 265   // Late barrier analysis must be done after schedule and bundle
 266   // Otherwise liveness based spilling will fail
 267   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 268   bs->late_barrier_analysis();
 269 
 270   pd_perform_mach_node_analysis();
 271 
 272   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 273 }
 274 
 275 // Convert Nodes to instruction bits and pass off to the VM
 276 void PhaseOutput::Output() {
 277   // RootNode goes
 278   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 279 
 280   // The number of new nodes (mostly MachNop) is proportional to
 281   // the number of java calls and inner loops which are aligned.
 282   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 283                             C->inner_loops()*(OptoLoopAlignment-1)),
 284                            "out of nodes before code generation" ) ) {
 285     return;
 286   }
 287   // Make sure I can find the Start Node
 288   Block *entry = C->cfg()->get_block(1);
 289   Block *broot = C->cfg()->get_root_block();
 290 
 291   const StartNode *start = entry->head()->as_Start();
 292 
 293   // Replace StartNode with prolog
 294   Label verified_entry;
 295   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 296   entry->map_node(prolog, 0);
 297   C->cfg()->map_node_to_block(prolog, entry);
 298   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 299 
 300   // Virtual methods need an unverified entry point
 301   if (C->is_osr_compilation()) {
 302     if (PoisonOSREntry) {

 303       // TODO: Should use a ShouldNotReachHereNode...
 304       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 305     }
 306   } else {
 307     if (C->method()) {
 308       if (C->method()->has_scalarized_args()) {
 309         // Add entry point to unpack all inline type arguments
 310         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 311         if (!C->method()->is_static()) {
 312           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 313           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 314           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 315           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 316         }
 317       } else if (!C->method()->is_static()) {
 318         // Insert unvalidated entry point
 319         C->cfg()->insert(broot, 0, new MachUEPNode());
 320       }
 321     }

 322   }
 323 
 324   // Break before main entry point
 325   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 326       (OptoBreakpoint && C->is_method_compilation())       ||
 327       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 328       (OptoBreakpointC2R && !C->method())                   ) {
 329     // checking for C->method() means that OptoBreakpoint does not apply to
 330     // runtime stubs or frame converters
 331     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 332   }
 333 
 334   // Insert epilogs before every return
 335   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 336     Block* block = C->cfg()->get_block(i);
 337     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 338       Node* m = block->end();
 339       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 340         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 341         block->add_inst(epilog);
 342         C->cfg()->map_node_to_block(epilog, block);
 343       }
 344     }
 345   }
 346 
 347   // Keeper of sizing aspects
 348   _buf_sizes = BufferSizingData();
 349 
 350   // Initialize code buffer
 351   estimate_buffer_size(_buf_sizes._const);
 352   if (C->failing()) return;
 353 
 354   // Pre-compute the length of blocks and replace
 355   // long branches with short if machine supports it.
 356   // Must be done before ScheduleAndBundle due to SPARC delay slots
 357   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 358   blk_starts[0] = 0;
 359   shorten_branches(blk_starts);
 360 
 361   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 362     // Compute the offsets of the entry points required by the inline type calling convention
 363     if (!C->method()->is_static()) {
 364       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 365       // Entry                     (unverified) @ offset 0
 366       // Verified_Inline_Entry_RO
 367       // Inline_Entry              (unverified)
 368       // Verified_Inline_Entry
 369       uint offset = 0;
 370       _code_offsets.set_value(CodeOffsets::Entry, offset);
 371 
 372       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 373       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 374 
 375       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 376       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 377 
 378       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 379       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 380     } else {
 381       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 382       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 383     }
 384   }
 385 
 386   ScheduleAndBundle();
 387   if (C->failing()) {
 388     return;
 389   }
 390 
 391   perform_mach_node_analysis();
 392 
 393   // Complete sizing of codebuffer
 394   CodeBuffer* cb = init_buffer();
 395   if (cb == nullptr || C->failing()) {
 396     return;
 397   }
 398 
 399   BuildOopMaps();
 400 
 401   if (C->failing())  {
 402     return;
 403   }
 404 
 405   C2_MacroAssembler masm(cb);

 527     // Sum all instruction sizes to compute block size
 528     uint last_inst = block->number_of_nodes();
 529     uint blk_size = 0;
 530     for (uint j = 0; j < last_inst; j++) {
 531       _index = j;
 532       Node* nj = block->get_node(_index);
 533       // Handle machine instruction nodes
 534       if (nj->is_Mach()) {
 535         MachNode* mach = nj->as_Mach();
 536         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 537         reloc_size += mach->reloc();
 538         if (mach->is_MachCall()) {
 539           // add size information for trampoline stub
 540           // class CallStubImpl is platform-specific and defined in the *.ad files.
 541           stub_size  += CallStubImpl::size_call_trampoline();
 542           reloc_size += CallStubImpl::reloc_call_trampoline();
 543 
 544           MachCallNode *mcall = mach->as_MachCall();
 545           // This destination address is NOT PC-relative
 546 
 547           if (mcall->entry_point() != nullptr) {
 548             mcall->method_set((intptr_t)mcall->entry_point());
 549           }
 550 
 551           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 552             stub_size  += CompiledDirectCall::to_interp_stub_size();
 553             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 554           }
 555         } else if (mach->is_MachSafePoint()) {
 556           // If call/safepoint are adjacent, account for possible
 557           // nop to disambiguate the two safepoints.
 558           // ScheduleAndBundle() can rearrange nodes in a block,
 559           // check for all offsets inside this block.
 560           if (last_call_adr >= blk_starts[i]) {
 561             blk_size += nop_size;
 562           }
 563         }
 564         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 565           // Nop is inserted between "avoid back to back" instructions.
 566           // ScheduleAndBundle() can rearrange nodes in a block,
 567           // check for all offsets inside this block.
 568           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 569             blk_size += nop_size;

 784     // New functionality:
 785     //   Assert if the local is not top. In product mode let the new node
 786     //   override the old entry.
 787     assert(local == C->top(), "LocArray collision");
 788     if (local == C->top()) {
 789       return;
 790     }
 791     array->pop();
 792   }
 793   const Type *t = local->bottom_type();
 794 
 795   // Is it a safepoint scalar object node?
 796   if (local->is_SafePointScalarObject()) {
 797     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 798 
 799     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 800     if (sv == nullptr) {
 801       ciKlass* cik = t->is_oopptr()->exact_klass();
 802       assert(cik->is_instance_klass() ||
 803              cik->is_array_klass(), "Not supported allocation.");
 804       uint first_ind = spobj->first_index(sfpt->jvms());
 805       // Nullable, scalarized inline types have a null_marker input
 806       // that needs to be checked before using the field values.
 807       ScopeValue* properties = nullptr;
 808       if (cik->is_inlinetype()) {
 809         Node* null_marker_node = sfpt->in(first_ind++);
 810         assert(null_marker_node != nullptr, "null_marker node not found");
 811         if (!null_marker_node->is_top()) {
 812           const TypeInt* null_marker_type = null_marker_node->bottom_type()->is_int();
 813           if (null_marker_node->is_Con()) {
 814             properties = new ConstantIntValue(null_marker_type->get_con());
 815           } else {
 816             OptoReg::Name null_marker_reg = C->regalloc()->get_reg_first(null_marker_node);
 817             properties = new_loc_value(C->regalloc(), null_marker_reg, Location::normal);
 818           }
 819         }
 820       }
 821       if (cik->is_array_klass() && !cik->is_type_array_klass()) {
 822         jint props = ArrayKlass::ArrayProperties::DEFAULT;
 823         if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
 824           if (cik->as_array_klass()->is_elem_null_free()) {
 825             props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
 826           }
 827           if (!cik->as_array_klass()->is_elem_atomic()) {
 828             props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
 829           }
 830         }
 831         properties = new ConstantIntValue(props);
 832       }
 833       sv = new ObjectValue(spobj->_idx,
 834                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
 835       set_sv_for_object_node(objs, sv);
 836 

 837       for (uint i = 0; i < spobj->n_fields(); i++) {
 838         Node* fld_node = sfpt->in(first_ind+i);
 839         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 840       }
 841     }
 842     array->append(sv);
 843     return;
 844   } else if (local->is_SafePointScalarMerge()) {
 845     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 846     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 847 
 848     if (mv == nullptr) {
 849       GrowableArray<ScopeValue*> deps;
 850 
 851       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 852       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 853       assert(deps.length() == 1, "missing value");
 854 
 855       int selector_idx = smerge->selector_idx(sfpt->jvms());
 856       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

1063       continue;
1064     }
1065 
1066     ObjectValue* other = sv_for_node_id(objs, n->_idx);
1067     if (ov == other) {
1068       return true;
1069     }
1070   }
1071   return false;
1072 }
1073 
1074 //--------------------------Process_OopMap_Node--------------------------------
1075 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1076   // Handle special safepoint nodes for synchronization
1077   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1078   MachCallNode      *mcall;
1079 
1080   int safepoint_pc_offset = current_offset;
1081   bool is_method_handle_invoke = false;
1082   bool return_oop = false;
1083   bool return_scalarized = false;
1084   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1085   bool arg_escape = false;
1086 
1087   // Add the safepoint in the DebugInfoRecorder
1088   if( !mach->is_MachCall() ) {
1089     mcall = nullptr;
1090     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1091   } else {
1092     mcall = mach->as_MachCall();
1093 
1094     // Is the call a MethodHandle call?
1095     if (mcall->is_MachCallJava()) {
1096       if (mcall->as_MachCallJava()->_method_handle_invoke) {
1097         assert(C->has_method_handle_invokes(), "must have been set during call generation");
1098         is_method_handle_invoke = true;
1099       }
1100       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1101     }
1102 
1103     // Check if a call returns an object.
1104     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1105       return_oop = true;
1106     }
1107     if (mcall->returns_scalarized()) {
1108       return_scalarized = true;
1109     }
1110     safepoint_pc_offset += mcall->ret_addr_offset();
1111     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1112   }
1113 
1114   // Loop over the JVMState list to add scope information
1115   // Do not skip safepoints with a null method, they need monitor info
1116   JVMState* youngest_jvms = sfn->jvms();
1117   int max_depth = youngest_jvms->depth();
1118 
1119   // Allocate the object pool for scalar-replaced objects -- the map from
1120   // small-integer keys (which can be recorded in the local and ostack
1121   // arrays) to descriptions of the object state.
1122   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1123 
1124   // Visit scopes from oldest to youngest.
1125   for (int depth = 1; depth <= max_depth; depth++) {
1126     JVMState* jvms = youngest_jvms->of_depth(depth);
1127     int idx;
1128     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1129     // Safepoints that do not have method() set only provide oop-map and monitor info

1159     // Build the growable array of ScopeValues for exp stack
1160     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1161 
1162     // Loop over monitors and insert into array
1163     for (idx = 0; idx < num_mon; idx++) {
1164       // Grab the node that defines this monitor
1165       Node* box_node = sfn->monitor_box(jvms, idx);
1166       Node* obj_node = sfn->monitor_obj(jvms, idx);
1167 
1168       // Create ScopeValue for object
1169       ScopeValue *scval = nullptr;
1170 
1171       if (obj_node->is_SafePointScalarObject()) {
1172         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1173         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1174         if (scval == nullptr) {
1175           const Type *t = spobj->bottom_type();
1176           ciKlass* cik = t->is_oopptr()->exact_klass();
1177           assert(cik->is_instance_klass() ||
1178                  cik->is_array_klass(), "Not supported allocation.");
1179           assert(!cik->is_inlinetype(), "Synchronization on value object?");
1180           ScopeValue* properties = nullptr;
1181           if (cik->is_array_klass() && !cik->is_type_array_klass()) {
1182             jint props = ArrayKlass::ArrayProperties::DEFAULT;
1183             if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
1184               if (cik->as_array_klass()->is_elem_null_free()) {
1185                 props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
1186               }
1187               if (!cik->as_array_klass()->is_elem_atomic()) {
1188                 props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
1189               }
1190             }
1191             properties = new ConstantIntValue(props);
1192           }
1193           ObjectValue* sv = new ObjectValue(spobj->_idx,
1194                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
1195           PhaseOutput::set_sv_for_object_node(objs, sv);
1196 
1197           uint first_ind = spobj->first_index(youngest_jvms);
1198           for (uint i = 0; i < spobj->n_fields(); i++) {
1199             Node* fld_node = sfn->in(first_ind+i);
1200             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1201           }
1202           scval = sv;
1203         }
1204       } else if (obj_node->is_SafePointScalarMerge()) {
1205         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1206         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1207 
1208         if (mv == nullptr) {
1209           GrowableArray<ScopeValue*> deps;
1210 
1211           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1212           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1213           assert(deps.length() == 1, "missing value");
1214 

1282     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1283     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1284 
1285     // Make method available for all Safepoints
1286     ciMethod* scope_method = method ? method : C->method();
1287     // Describe the scope here
1288     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1289     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1290     // Now we can describe the scope.
1291     methodHandle null_mh;
1292     bool rethrow_exception = false;
1293     C->debug_info()->describe_scope(
1294       safepoint_pc_offset,
1295       null_mh,
1296       scope_method,
1297       jvms->bci(),
1298       jvms->should_reexecute(),
1299       rethrow_exception,
1300       is_method_handle_invoke,
1301       return_oop,
1302       return_scalarized,
1303       has_ea_local_in_scope,
1304       arg_escape,
1305       locvals,
1306       expvals,
1307       monvals
1308     );
1309   } // End jvms loop
1310 
1311   // Mark the end of the scope set.
1312   C->debug_info()->end_safepoint(safepoint_pc_offset);
1313 }
1314 
1315 
1316 
1317 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1318 class NonSafepointEmitter {
1319     Compile*  C;
1320     JVMState* _pending_jvms;
1321     int       _pending_offset;
1322 

1658           MachNode *nop = new MachNopNode(nops_cnt);
1659           block->insert_node(nop, j++);
1660           last_inst++;
1661           C->cfg()->map_node_to_block(nop, block);
1662           // Ensure enough space.
1663           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1664           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1665             C->record_failure("CodeCache is full");
1666             return;
1667           }
1668           nop->emit(masm, C->regalloc());
1669           masm->code()->flush_bundle(true);
1670           current_offset = masm->offset();
1671         }
1672 
1673         bool observe_safepoint = is_sfn;
1674         // Remember the start of the last call in a basic block
1675         if (is_mcall) {
1676           MachCallNode *mcall = mach->as_MachCall();
1677 
1678           if (mcall->entry_point() != nullptr) {
1679             // This destination address is NOT PC-relative
1680             mcall->method_set((intptr_t)mcall->entry_point());
1681           }
1682 
1683           // Save the return address
1684           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1685 
1686           observe_safepoint = mcall->guaranteed_safepoint();
1687         }
1688 
1689         // sfn will be valid whenever mcall is valid now because of inheritance
1690         if (observe_safepoint) {
1691           // Handle special safepoint nodes for synchronization
1692           if (!is_mcall) {
1693             MachSafePointNode *sfn = mach->as_MachSafePoint();
1694             // !!!!! Stubs only need an oopmap right now, so bail out
1695             if (sfn->jvms()->method() == nullptr) {
1696               // Write the oopmap directly to the code blob??!!
1697               continue;
1698             }
1699           } // End synchronization
1700 
1701           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1802       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1803         node_offsets[n->_idx] = masm->offset();
1804       }
1805 #endif
1806       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1807 
1808       // "Normal" instruction case
1809       DEBUG_ONLY(uint instr_offset = masm->offset());
1810       n->emit(masm, C->regalloc());
1811       current_offset = masm->offset();
1812 
1813       // Above we only verified that there is enough space in the instruction section.
1814       // However, the instruction may emit stubs that cause code buffer expansion.
1815       // Bail out here if expansion failed due to a lack of code cache space.
1816       if (C->failing()) {
1817         return;
1818       }
1819 
1820       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1821              "ret_addr_offset() not within emitted code");

1822 #ifdef ASSERT
1823       uint n_size = n->size(C->regalloc());
1824       if (n_size < (current_offset-instr_offset)) {
1825         MachNode* mach = n->as_Mach();
1826         n->dump();
1827         mach->dump_format(C->regalloc(), tty);
1828         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1829         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1830         tty->print_cr(" ------------------- ");
1831         BufferBlob* blob = this->scratch_buffer_blob();
1832         address blob_begin = blob->content_begin();
1833         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1834         assert(false, "wrong size of mach node");
1835       }
1836 #endif
1837       non_safepoints.observe_instruction(n, current_offset);
1838 
1839       // mcall is last "call" that can be a safepoint
1840       // record it so we can see if a poll will directly follow it
1841       // in which case we'll need a pad to make the PcDesc sites unique

3239         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3240         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3241       }
3242     }
3243     // Do not allow defs of new derived values to float above GC
3244     // points unless the base is definitely available at the GC point.
3245 
3246     Node *m = b->get_node(i);
3247 
3248     // Add precedence edge from following safepoint to use of derived pointer
3249     if( last_safept_node != end_node &&
3250         m != last_safept_node) {
3251       for (uint k = 1; k < m->req(); k++) {
3252         const Type *t = m->in(k)->bottom_type();
3253         if( t->isa_oop_ptr() &&
3254             t->is_ptr()->offset() != 0 ) {
3255           last_safept_node->add_prec( m );
3256           break;
3257         }
3258       }
3259 
3260       // Do not allow a CheckCastPP node whose input is a raw pointer to
3261       // float past a safepoint.  This can occur when a buffered inline
3262       // type is allocated in a loop and the CheckCastPP from that
3263       // allocation is reused outside the loop.  If the use inside the
3264       // loop is scalarized the CheckCastPP will no longer be connected
3265       // to the loop safepoint.  See JDK-8264340.
3266       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3267         Node *def = m->in(1);
3268         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3269           last_safept_node->add_prec(m);
3270         }
3271       }
3272     }
3273 
3274     if( n->jvms() ) {           // Precedence edge from derived to safept
3275       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3276       if( b->get_node(last_safept) != last_safept_node ) {
3277         last_safept = b->find_node(last_safept_node);
3278       }
3279       for( uint j=last_safept; j > i; j-- ) {
3280         Node *mach = b->get_node(j);
3281         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3282           mach->add_prec( n );
3283       }
3284       last_safept = i;
3285       last_safept_node = m;
3286     }
3287   }
3288 
3289   if (fat_proj_seen) {
3290     // Garbage collect pinch nodes that were not consumed.
3291     // They are usually created by a fat kill MachProj for a call.

3410 }
3411 #endif
3412 
3413 //-----------------------init_scratch_buffer_blob------------------------------
3414 // Construct a temporary BufferBlob and cache it for this compile.
3415 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3416   // If there is already a scratch buffer blob allocated and the
3417   // constant section is big enough, use it.  Otherwise free the
3418   // current and allocate a new one.
3419   BufferBlob* blob = scratch_buffer_blob();
3420   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3421     // Use the current blob.
3422   } else {
3423     if (blob != nullptr) {
3424       BufferBlob::free(blob);
3425     }
3426 
3427     ResourceMark rm;
3428     _scratch_const_size = const_size;
3429     int size = C2Compiler::initial_code_buffer_size(const_size);
3430     if (C->has_scalarized_args()) {
3431       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3432       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3433       ciMethod* method = C->method();
3434       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3435       int arg_num = 0;
3436       if (!method->is_static()) {
3437         if (method->is_scalarized_arg(arg_num)) {
3438           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3439         }
3440         arg_num++;
3441       }
3442       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3443         if (method->is_scalarized_arg(arg_num)) {
3444           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3445         }
3446         arg_num++;
3447       }
3448     }
3449     blob = BufferBlob::create("Compile::scratch_buffer", size);
3450     // Record the buffer blob for next time.
3451     set_scratch_buffer_blob(blob);
3452     // Have we run out of code space?
3453     if (scratch_buffer_blob() == nullptr) {
3454       // Let CompilerBroker disable further compilations.
3455       C->record_failure("Not enough space for scratch buffer in CodeCache");
3456       return;
3457     }
3458   }
3459 
3460   // Initialize the relocation buffers
3461   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3462   set_scratch_locs_memory(locs_buf);
3463 }
3464 
3465 
3466 //-----------------------scratch_emit_size-------------------------------------
3467 // Helper function that computes size by emitting code
3468 uint PhaseOutput::scratch_emit_size(const Node* n) {

3499   buf.insts()->set_scratch_emit();
3500   buf.stubs()->set_scratch_emit();
3501 
3502   // Do the emission.
3503 
3504   Label fakeL; // Fake label for branch instructions.
3505   Label*   saveL = nullptr;
3506   uint save_bnum = 0;
3507   bool is_branch = n->is_MachBranch();
3508   C2_MacroAssembler masm(&buf);
3509   masm.bind(fakeL);
3510   if (is_branch) {
3511     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3512     n->as_MachBranch()->label_set(&fakeL, 0);
3513   }
3514   n->emit(&masm, C->regalloc());
3515 
3516   // Emitting into the scratch buffer should not fail
3517   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3518 
3519   // Restore label.
3520   if (is_branch) {
3521     n->as_MachBranch()->label_set(saveL, save_bnum);
3522   }
3523 
3524   // End scratch_emit_size section.
3525   set_in_scratch_emit_size(false);
3526 
3527   return buf.insts_size();
3528 }
3529 
3530 void PhaseOutput::install() {
3531   if (!C->should_install_code()) {
3532     return;
3533   } else if (C->stub_function() != nullptr) {
3534     install_stub(C->stub_name());
3535   } else {
3536     install_code(C->method(),
3537                  C->entry_bci(),
3538                  CompileBroker::compiler2(),
3539                  C->has_unsafe_access(),
3540                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3541   }
3542 }
3543 
3544 void PhaseOutput::install_code(ciMethod*         target,
3545                                int               entry_bci,
3546                                AbstractCompiler* compiler,
3547                                bool              has_unsafe_access,
3548                                bool              has_wide_vectors) {
3549   // Check if we want to skip execution of all compiled code.
3550   {
3551 #ifndef PRODUCT
3552     if (OptoNoExecute) {
3553       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3554       return;
3555     }
3556 #endif
3557     Compile::TracePhase tp(_t_registerMethod);
3558 
3559     if (C->is_osr_compilation()) {
3560       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3561       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3562     } else {






3563       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3564       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3565         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3566       }
3567       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3568         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3569       }
3570       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3571         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3572       }
3573       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3574     }
3575 
3576     C->env()->register_method(target,
3577                               entry_bci,
3578                               &_code_offsets,
3579                               _orig_pc_slot_offset_in_bytes,
3580                               code_buffer(),
3581                               frame_size_in_words(),
3582                               _oop_map_set,
3583                               &_handler_table,
3584                               inc_table(),
3585                               compiler,
3586                               has_unsafe_access,
3587                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3588                               C->has_monitors(),
3589                               C->has_scoped_access(),
3590                               0);
3591 
3592     if (C->log() != nullptr) { // Print code cache state into compiler log
3593       C->log()->code_cache_state();
3594     }
3595   }
3596 }
3597 void PhaseOutput::install_stub(const char* stub_name) {
3598   // Entry point will be accessed using stub_entry_point();
3599   if (code_buffer() == nullptr) {
3600     Matcher::soft_match_failure();
3601   } else {
3602     if (PrintAssembly && (WizardMode || Verbose))
3603       tty->print_cr("### Stub::%s", stub_name);
3604 
3605     if (!C->failing()) {
3606       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3607 
3608       // Make the NMethod
3609       // For now we mark the frame as never safe for profile stackwalking
3610       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >