< prev index next >

src/hotspot/share/opto/output.cpp

Print this page

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"

  36 #include "memory/allocation.hpp"
  37 #include "opto/ad.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/c2_MacroAssembler.hpp"
  40 #include "opto/c2compiler.hpp"
  41 #include "opto/callnode.hpp"
  42 #include "opto/cfgnode.hpp"
  43 #include "opto/locknode.hpp"
  44 #include "opto/machnode.hpp"
  45 #include "opto/node.hpp"
  46 #include "opto/optoreg.hpp"
  47 #include "opto/output.hpp"
  48 #include "opto/regalloc.hpp"
  49 #include "opto/type.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/macros.hpp"
  52 #include "utilities/powerOfTwo.hpp"
  53 #include "utilities/xmlstream.hpp"
  54 
  55 #ifndef PRODUCT

 212     _first_block_size(0),
 213     _handler_table(),
 214     _inc_table(),
 215     _stub_list(),
 216     _oop_map_set(nullptr),
 217     _scratch_buffer_blob(nullptr),
 218     _scratch_locs_memory(nullptr),
 219     _scratch_const_size(-1),
 220     _in_scratch_emit_size(false),
 221     _frame_slots(0),
 222     _code_offsets(),
 223     _node_bundling_limit(0),
 224     _node_bundling_base(nullptr),
 225     _orig_pc_slot(0),
 226     _orig_pc_slot_offset_in_bytes(0),
 227     _buf_sizes(),
 228     _block(nullptr),
 229     _index(0) {
 230   C->set_output(this);
 231   if (C->stub_name() == nullptr) {
 232     _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size);








 233   }
 234 }
 235 
 236 PhaseOutput::~PhaseOutput() {
 237   C->set_output(nullptr);
 238   if (_scratch_buffer_blob != nullptr) {
 239     BufferBlob::free(_scratch_buffer_blob);
 240   }
 241 }
 242 
 243 void PhaseOutput::perform_mach_node_analysis() {
 244   // Late barrier analysis must be done after schedule and bundle
 245   // Otherwise liveness based spilling will fail
 246   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 247   bs->late_barrier_analysis();
 248 
 249   pd_perform_mach_node_analysis();
 250 
 251   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 252 }
 253 
 254 // Convert Nodes to instruction bits and pass off to the VM
 255 void PhaseOutput::Output() {
 256   // RootNode goes
 257   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 258 
 259   // The number of new nodes (mostly MachNop) is proportional to
 260   // the number of java calls and inner loops which are aligned.
 261   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 262                             C->inner_loops()*(OptoLoopAlignment-1)),
 263                            "out of nodes before code generation" ) ) {
 264     return;
 265   }
 266   // Make sure I can find the Start Node
 267   Block *entry = C->cfg()->get_block(1);
 268   Block *broot = C->cfg()->get_root_block();
 269 
 270   const StartNode *start = entry->head()->as_Start();
 271 
 272   // Replace StartNode with prolog
 273   MachPrologNode *prolog = new MachPrologNode();

 274   entry->map_node(prolog, 0);
 275   C->cfg()->map_node_to_block(prolog, entry);
 276   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 277 
 278   // Virtual methods need an unverified entry point
 279 
 280   if( C->is_osr_compilation() ) {
 281     if( PoisonOSREntry ) {
 282       // TODO: Should use a ShouldNotReachHereNode...
 283       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 284     }
 285   } else {
 286     if( C->method() && !C->method()->flags().is_static() ) {
 287       // Insert unvalidated entry point
 288       C->cfg()->insert( broot, 0, new MachUEPNode() );











 289     }
 290 
 291   }
 292 
 293   // Break before main entry point
 294   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 295       (OptoBreakpoint && C->is_method_compilation())       ||
 296       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 297       (OptoBreakpointC2R && !C->method())                   ) {
 298     // checking for C->method() means that OptoBreakpoint does not apply to
 299     // runtime stubs or frame converters
 300     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 301   }
 302 
 303   // Insert epilogs before every return
 304   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 305     Block* block = C->cfg()->get_block(i);
 306     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 307       Node* m = block->end();
 308       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 309         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 310         block->add_inst(epilog);
 311         C->cfg()->map_node_to_block(epilog, block);
 312       }
 313     }
 314   }
 315 
 316   // Keeper of sizing aspects
 317   _buf_sizes = BufferSizingData();
 318 
 319   // Initialize code buffer
 320   estimate_buffer_size(_buf_sizes._const);
 321   if (C->failing()) return;
 322 
 323   // Pre-compute the length of blocks and replace
 324   // long branches with short if machine supports it.
 325   // Must be done before ScheduleAndBundle due to SPARC delay slots
 326   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 327   blk_starts[0] = 0;
 328   shorten_branches(blk_starts);
 329 

























 330   ScheduleAndBundle();
 331   if (C->failing()) {
 332     return;
 333   }
 334 
 335   perform_mach_node_analysis();
 336 
 337   // Complete sizing of codebuffer
 338   CodeBuffer* cb = init_buffer();
 339   if (cb == nullptr || C->failing()) {
 340     return;
 341   }
 342 
 343   BuildOopMaps();
 344 
 345   if (C->failing())  {
 346     return;
 347   }
 348 
 349   C2_MacroAssembler masm(cb);

 471     // Sum all instruction sizes to compute block size
 472     uint last_inst = block->number_of_nodes();
 473     uint blk_size = 0;
 474     for (uint j = 0; j < last_inst; j++) {
 475       _index = j;
 476       Node* nj = block->get_node(_index);
 477       // Handle machine instruction nodes
 478       if (nj->is_Mach()) {
 479         MachNode* mach = nj->as_Mach();
 480         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 481         reloc_size += mach->reloc();
 482         if (mach->is_MachCall()) {
 483           // add size information for trampoline stub
 484           // class CallStubImpl is platform-specific and defined in the *.ad files.
 485           stub_size  += CallStubImpl::size_call_trampoline();
 486           reloc_size += CallStubImpl::reloc_call_trampoline();
 487 
 488           MachCallNode *mcall = mach->as_MachCall();
 489           // This destination address is NOT PC-relative
 490 
 491           mcall->method_set((intptr_t)mcall->entry_point());


 492 
 493           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 494             stub_size  += CompiledDirectCall::to_interp_stub_size();
 495             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 496           }
 497         } else if (mach->is_MachSafePoint()) {
 498           // If call/safepoint are adjacent, account for possible
 499           // nop to disambiguate the two safepoints.
 500           // ScheduleAndBundle() can rearrange nodes in a block,
 501           // check for all offsets inside this block.
 502           if (last_call_adr >= blk_starts[i]) {
 503             blk_size += nop_size;
 504           }
 505         }
 506         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 507           // Nop is inserted between "avoid back to back" instructions.
 508           // ScheduleAndBundle() can rearrange nodes in a block,
 509           // check for all offsets inside this block.
 510           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 511             blk_size += nop_size;

 726     // New functionality:
 727     //   Assert if the local is not top. In product mode let the new node
 728     //   override the old entry.
 729     assert(local == C->top(), "LocArray collision");
 730     if (local == C->top()) {
 731       return;
 732     }
 733     array->pop();
 734   }
 735   const Type *t = local->bottom_type();
 736 
 737   // Is it a safepoint scalar object node?
 738   if (local->is_SafePointScalarObject()) {
 739     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 740 
 741     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 742     if (sv == nullptr) {
 743       ciKlass* cik = t->is_oopptr()->exact_klass();
 744       assert(cik->is_instance_klass() ||
 745              cik->is_array_klass(), "Not supported allocation.");





























 746       sv = new ObjectValue(spobj->_idx,
 747                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 748       set_sv_for_object_node(objs, sv);
 749 
 750       uint first_ind = spobj->first_index(sfpt->jvms());
 751       for (uint i = 0; i < spobj->n_fields(); i++) {
 752         Node* fld_node = sfpt->in(first_ind+i);
 753         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 754       }
 755     }
 756     array->append(sv);
 757     return;
 758   } else if (local->is_SafePointScalarMerge()) {
 759     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 760     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 761 
 762     if (mv == nullptr) {
 763       GrowableArray<ScopeValue*> deps;
 764 
 765       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 766       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 767       assert(deps.length() == 1, "missing value");
 768 
 769       int selector_idx = smerge->selector_idx(sfpt->jvms());
 770       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

 976     if (!n->is_SafePointScalarObject()) {
 977       continue;
 978     }
 979 
 980     ObjectValue* other = sv_for_node_id(objs, n->_idx);
 981     if (ov == other) {
 982       return true;
 983     }
 984   }
 985   return false;
 986 }
 987 
 988 //--------------------------Process_OopMap_Node--------------------------------
 989 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
 990   // Handle special safepoint nodes for synchronization
 991   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 992   MachCallNode      *mcall;
 993 
 994   int safepoint_pc_offset = current_offset;
 995   bool return_oop = false;

 996   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
 997   bool arg_escape = false;
 998 
 999   // Add the safepoint in the DebugInfoRecorder
1000   if( !mach->is_MachCall() ) {
1001     mcall = nullptr;
1002     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1003   } else {
1004     mcall = mach->as_MachCall();
1005 
1006     if (mcall->is_MachCallJava()) {
1007       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1008     }
1009 
1010     // Check if a call returns an object.
1011     if (mcall->returns_pointer()) {
1012       return_oop = true;
1013     }



1014     safepoint_pc_offset += mcall->ret_addr_offset();
1015     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1016   }
1017 
1018   // Loop over the JVMState list to add scope information
1019   // Do not skip safepoints with a null method, they need monitor info
1020   JVMState* youngest_jvms = sfn->jvms();
1021   int max_depth = youngest_jvms->depth();
1022 
1023   // Allocate the object pool for scalar-replaced objects -- the map from
1024   // small-integer keys (which can be recorded in the local and ostack
1025   // arrays) to descriptions of the object state.
1026   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1027 
1028   // Visit scopes from oldest to youngest.
1029   for (int depth = 1; depth <= max_depth; depth++) {
1030     JVMState* jvms = youngest_jvms->of_depth(depth);
1031     int idx;
1032     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1033     // Safepoints that do not have method() set only provide oop-map and monitor info

1062     // Build the growable array of ScopeValues for exp stack
1063     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1064 
1065     // Loop over monitors and insert into array
1066     for (idx = 0; idx < num_mon; idx++) {
1067       // Grab the node that defines this monitor
1068       Node* box_node = sfn->monitor_box(jvms, idx);
1069       Node* obj_node = sfn->monitor_obj(jvms, idx);
1070 
1071       // Create ScopeValue for object
1072       ScopeValue *scval = nullptr;
1073 
1074       if (obj_node->is_SafePointScalarObject()) {
1075         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1076         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1077         if (scval == nullptr) {
1078           const Type *t = spobj->bottom_type();
1079           ciKlass* cik = t->is_oopptr()->exact_klass();
1080           assert(cik->is_instance_klass() ||
1081                  cik->is_array_klass(), "Not supported allocation.");














1082           ObjectValue* sv = new ObjectValue(spobj->_idx,
1083                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
1084           PhaseOutput::set_sv_for_object_node(objs, sv);
1085 
1086           uint first_ind = spobj->first_index(youngest_jvms);
1087           for (uint i = 0; i < spobj->n_fields(); i++) {
1088             Node* fld_node = sfn->in(first_ind+i);
1089             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1090           }
1091           scval = sv;
1092         }
1093       } else if (obj_node->is_SafePointScalarMerge()) {
1094         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1095         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1096 
1097         if (mv == nullptr) {
1098           GrowableArray<ScopeValue*> deps;
1099 
1100           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1101           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1102           assert(deps.length() == 1, "missing value");
1103 

1170     DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
1171     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1172     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1173 
1174     // Make method available for all Safepoints
1175     ciMethod* scope_method = method ? method : C->method();
1176     // Describe the scope here
1177     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1178     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1179     // Now we can describe the scope.
1180     methodHandle null_mh;
1181     bool rethrow_exception = false;
1182     C->debug_info()->describe_scope(
1183       safepoint_pc_offset,
1184       null_mh,
1185       scope_method,
1186       jvms->bci(),
1187       jvms->should_reexecute(),
1188       rethrow_exception,
1189       return_oop,

1190       has_ea_local_in_scope,
1191       arg_escape,
1192       locvals,
1193       expvals,
1194       monvals
1195     );
1196   } // End jvms loop
1197 
1198   // Mark the end of the scope set.
1199   C->debug_info()->end_safepoint(safepoint_pc_offset);
1200 }
1201 
1202 
1203 
1204 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1205 class NonSafepointEmitter {
1206     Compile*  C;
1207     JVMState* _pending_jvms;
1208     int       _pending_offset;
1209 

1525           MachNode *nop = new MachNopNode(nops_cnt);
1526           block->insert_node(nop, j++);
1527           last_inst++;
1528           C->cfg()->map_node_to_block(nop, block);
1529           // Ensure enough space.
1530           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1532             C->record_failure("CodeCache is full");
1533             return;
1534           }
1535           nop->emit(masm, C->regalloc());
1536           masm->code()->flush_bundle(true);
1537           current_offset = masm->offset();
1538         }
1539 
1540         bool observe_safepoint = is_sfn;
1541         // Remember the start of the last call in a basic block
1542         if (is_mcall) {
1543           MachCallNode *mcall = mach->as_MachCall();
1544 
1545           // This destination address is NOT PC-relative
1546           mcall->method_set((intptr_t)mcall->entry_point());


1547 
1548           // Save the return address
1549           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1550 
1551           observe_safepoint = mcall->guaranteed_safepoint();
1552         }
1553 
1554         // sfn will be valid whenever mcall is valid now because of inheritance
1555         if (observe_safepoint) {
1556           // Handle special safepoint nodes for synchronization
1557           if (!is_mcall) {
1558             MachSafePointNode *sfn = mach->as_MachSafePoint();
1559             // !!!!! Stubs only need an oopmap right now, so bail out
1560             if (sfn->jvms()->method() == nullptr) {
1561               // Write the oopmap directly to the code blob??!!
1562               continue;
1563             }
1564           } // End synchronization
1565 
1566           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1664       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1665         node_offsets[n->_idx] = masm->offset();
1666       }
1667 #endif
1668       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1669 
1670       // "Normal" instruction case
1671       DEBUG_ONLY(uint instr_offset = masm->offset());
1672       n->emit(masm, C->regalloc());
1673       current_offset = masm->offset();
1674 
1675       // Above we only verified that there is enough space in the instruction section.
1676       // However, the instruction may emit stubs that cause code buffer expansion.
1677       // Bail out here if expansion failed due to a lack of code cache space.
1678       if (C->failing()) {
1679         return;
1680       }
1681 
1682       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1683              "ret_addr_offset() not within emitted code");
1684 
1685 #ifdef ASSERT
1686       uint n_size = n->size(C->regalloc());
1687       if (n_size < (current_offset-instr_offset)) {
1688         MachNode* mach = n->as_Mach();
1689         n->dump();
1690         mach->dump_format(C->regalloc(), tty);
1691         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1692         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1693         tty->print_cr(" ------------------- ");
1694         BufferBlob* blob = this->scratch_buffer_blob();
1695         address blob_begin = blob->content_begin();
1696         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1697         assert(false, "wrong size of mach node");
1698       }
1699 #endif
1700       non_safepoints.observe_instruction(n, current_offset);
1701 
1702       // mcall is last "call" that can be a safepoint
1703       // record it so we can see if a poll will directly follow it
1704       // in which case we'll need a pad to make the PcDesc sites unique

2921         anti_do_use( b, n, _regalloc->get_reg_first(def) );
2922         anti_do_use( b, n, _regalloc->get_reg_second(def) );
2923       }
2924     }
2925     // Do not allow defs of new derived values to float above GC
2926     // points unless the base is definitely available at the GC point.
2927 
2928     Node *m = b->get_node(i);
2929 
2930     // Add precedence edge from following safepoint to use of derived pointer
2931     if( last_safept_node != end_node &&
2932         m != last_safept_node) {
2933       for (uint k = 1; k < m->req(); k++) {
2934         const Type *t = m->in(k)->bottom_type();
2935         if( t->isa_oop_ptr() &&
2936             t->is_ptr()->offset() != 0 ) {
2937           last_safept_node->add_prec( m );
2938           break;
2939         }
2940       }













2941     }
2942 
2943     if( n->jvms() ) {           // Precedence edge from derived to safept
2944       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2945       if( b->get_node(last_safept) != last_safept_node ) {
2946         last_safept = b->find_node(last_safept_node);
2947       }
2948       for( uint j=last_safept; j > i; j-- ) {
2949         Node *mach = b->get_node(j);
2950         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2951           mach->add_prec( n );
2952       }
2953       last_safept = i;
2954       last_safept_node = m;
2955     }
2956   }
2957 
2958   if (fat_proj_seen) {
2959     // Garbage collect pinch nodes that were not consumed.
2960     // They are usually created by a fat kill MachProj for a call.

3069 }
3070 #endif
3071 
3072 //-----------------------init_scratch_buffer_blob------------------------------
3073 // Construct a temporary BufferBlob and cache it for this compile.
3074 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3075   // If there is already a scratch buffer blob allocated and the
3076   // constant section is big enough, use it.  Otherwise free the
3077   // current and allocate a new one.
3078   BufferBlob* blob = scratch_buffer_blob();
3079   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3080     // Use the current blob.
3081   } else {
3082     if (blob != nullptr) {
3083       BufferBlob::free(blob);
3084     }
3085 
3086     ResourceMark rm;
3087     _scratch_const_size = const_size;
3088     int size = C2Compiler::initial_code_buffer_size(const_size);



















3089     blob = BufferBlob::create("Compile::scratch_buffer", size);
3090     // Record the buffer blob for next time.
3091     set_scratch_buffer_blob(blob);
3092     // Have we run out of code space?
3093     if (scratch_buffer_blob() == nullptr) {
3094       // Let CompilerBroker disable further compilations.
3095       C->record_failure("Not enough space for scratch buffer in CodeCache");
3096       return;
3097     }
3098   }
3099 
3100   // Initialize the relocation buffers
3101   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3102   set_scratch_locs_memory(locs_buf);
3103 }
3104 
3105 
3106 //-----------------------scratch_emit_size-------------------------------------
3107 // Helper function that computes size by emitting code
3108 uint PhaseOutput::scratch_emit_size(const Node* n) {

3139   buf.insts()->set_scratch_emit();
3140   buf.stubs()->set_scratch_emit();
3141 
3142   // Do the emission.
3143 
3144   Label fakeL; // Fake label for branch instructions.
3145   Label*   saveL = nullptr;
3146   uint save_bnum = 0;
3147   bool is_branch = n->is_MachBranch();
3148   C2_MacroAssembler masm(&buf);
3149   masm.bind(fakeL);
3150   if (is_branch) {
3151     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3152     n->as_MachBranch()->label_set(&fakeL, 0);
3153   }
3154   n->emit(&masm, C->regalloc());
3155 
3156   // Emitting into the scratch buffer should not fail
3157   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3158 
3159   if (is_branch) // Restore label.

3160     n->as_MachBranch()->label_set(saveL, save_bnum);

3161 
3162   // End scratch_emit_size section.
3163   set_in_scratch_emit_size(false);
3164 
3165   return buf.insts_size();
3166 }
3167 
3168 void PhaseOutput::install() {
3169   if (!C->should_install_code()) {
3170     return;
3171   } else if (C->stub_function() != nullptr) {
3172     install_stub(C->stub_name());
3173   } else {
3174     install_code(C->method(),
3175                  C->entry_bci(),
3176                  CompileBroker::compiler2(),
3177                  C->has_unsafe_access(),
3178                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3179   }
3180 }
3181 
3182 void PhaseOutput::install_code(ciMethod*         target,
3183                                int               entry_bci,
3184                                AbstractCompiler* compiler,
3185                                bool              has_unsafe_access,
3186                                bool              has_wide_vectors) {
3187   // Check if we want to skip execution of all compiled code.
3188   {
3189 #ifndef PRODUCT
3190     if (OptoNoExecute) {
3191       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3192       return;
3193     }
3194 #endif
3195     Compile::TracePhase tp(_t_registerMethod);
3196 
3197     if (C->is_osr_compilation()) {
3198       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3199       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3200     } else {
3201       if (!target->is_static()) {
3202         // The UEP of an nmethod ensures that the VEP is padded. However, the padding of the UEP is placed
3203         // before the inline cache check, so we don't have to execute any nop instructions when dispatching
3204         // through the UEP, yet we can ensure that the VEP is aligned appropriately.
3205         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size - MacroAssembler::ic_check_size());
3206       }
3207       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);









3208       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3209     }
3210 
3211     C->env()->register_method(target,
3212                                      entry_bci,
3213                                      &_code_offsets,
3214                                      _orig_pc_slot_offset_in_bytes,
3215                                      code_buffer(),
3216                                      frame_size_in_words(),
3217                                      oop_map_set(),
3218                                      &_handler_table,
3219                                      inc_table(),
3220                                      compiler,
3221                                      has_unsafe_access,
3222                                      SharedRuntime::is_wide_vector(C->max_vector_size()),
3223                                      C->has_monitors(),
3224                                      C->has_scoped_access(),
3225                                      0);
3226 
3227     if (C->log() != nullptr) { // Print code cache state into compiler log
3228       C->log()->code_cache_state();
3229     }
3230   }
3231 }
3232 void PhaseOutput::install_stub(const char* stub_name) {
3233   // Entry point will be accessed using stub_entry_point();
3234   if (code_buffer() == nullptr) {
3235     Matcher::soft_match_failure();
3236   } else {
3237     if (PrintAssembly && (WizardMode || Verbose))
3238       tty->print_cr("### Stub::%s", stub_name);
3239 
3240     if (!C->failing()) {
3241       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3242 
3243       // Make the NMethod
3244       // For now we mark the frame as never safe for profile stackwalking
3245       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,

  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/assembler.inline.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/compiledIC.hpp"
  28 #include "code/debugInfo.hpp"
  29 #include "code/debugInfoRec.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "compiler/compilerDirectives.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "compiler/oopMap.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "gc/shared/c2/barrierSetC2.hpp"
  36 #include "gc/shared/gc_globals.hpp"
  37 #include "memory/allocation.hpp"
  38 #include "opto/ad.hpp"
  39 #include "opto/block.hpp"
  40 #include "opto/c2_MacroAssembler.hpp"
  41 #include "opto/c2compiler.hpp"
  42 #include "opto/callnode.hpp"
  43 #include "opto/cfgnode.hpp"
  44 #include "opto/locknode.hpp"
  45 #include "opto/machnode.hpp"
  46 #include "opto/node.hpp"
  47 #include "opto/optoreg.hpp"
  48 #include "opto/output.hpp"
  49 #include "opto/regalloc.hpp"
  50 #include "opto/type.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 #include "utilities/xmlstream.hpp"
  55 
  56 #ifndef PRODUCT

 213     _first_block_size(0),
 214     _handler_table(),
 215     _inc_table(),
 216     _stub_list(),
 217     _oop_map_set(nullptr),
 218     _scratch_buffer_blob(nullptr),
 219     _scratch_locs_memory(nullptr),
 220     _scratch_const_size(-1),
 221     _in_scratch_emit_size(false),
 222     _frame_slots(0),
 223     _code_offsets(),
 224     _node_bundling_limit(0),
 225     _node_bundling_base(nullptr),
 226     _orig_pc_slot(0),
 227     _orig_pc_slot_offset_in_bytes(0),
 228     _buf_sizes(),
 229     _block(nullptr),
 230     _index(0) {
 231   C->set_output(this);
 232   if (C->stub_name() == nullptr) {
 233     int fixed_slots = C->fixed_slots();
 234     if (C->needs_stack_repair()) {
 235       fixed_slots -= 2;
 236     }
 237     // TODO 8284443 Only reserve extra slot if needed
 238     if (InlineTypeReturnedAsFields) {
 239       fixed_slots -= 2;
 240     }
 241     _orig_pc_slot = fixed_slots - (sizeof(address) / VMRegImpl::stack_slot_size);
 242   }
 243 }
 244 
 245 PhaseOutput::~PhaseOutput() {
 246   C->set_output(nullptr);
 247   if (_scratch_buffer_blob != nullptr) {
 248     BufferBlob::free(_scratch_buffer_blob);
 249   }
 250 }
 251 
 252 void PhaseOutput::perform_mach_node_analysis() {
 253   // Late barrier analysis must be done after schedule and bundle
 254   // Otherwise liveness based spilling will fail
 255   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 256   bs->late_barrier_analysis();
 257 
 258   pd_perform_mach_node_analysis();
 259 
 260   C->print_method(CompilerPhaseType::PHASE_MACH_ANALYSIS, 3);
 261 }
 262 
 263 // Convert Nodes to instruction bits and pass off to the VM
 264 void PhaseOutput::Output() {
 265   // RootNode goes
 266   assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
 267 
 268   // The number of new nodes (mostly MachNop) is proportional to
 269   // the number of java calls and inner loops which are aligned.
 270   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
 271                             C->inner_loops()*(OptoLoopAlignment-1)),
 272                            "out of nodes before code generation" ) ) {
 273     return;
 274   }
 275   // Make sure I can find the Start Node
 276   Block *entry = C->cfg()->get_block(1);
 277   Block *broot = C->cfg()->get_root_block();
 278 
 279   const StartNode *start = entry->head()->as_Start();
 280 
 281   // Replace StartNode with prolog
 282   Label verified_entry;
 283   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
 284   entry->map_node(prolog, 0);
 285   C->cfg()->map_node_to_block(prolog, entry);
 286   C->cfg()->unmap_node_from_block(start); // start is no longer in any block
 287 
 288   // Virtual methods need an unverified entry point
 289   if (C->is_osr_compilation()) {
 290     if (PoisonOSREntry) {

 291       // TODO: Should use a ShouldNotReachHereNode...
 292       C->cfg()->insert( broot, 0, new MachBreakpointNode() );
 293     }
 294   } else {
 295     if (C->method()) {
 296       if (C->method()->has_scalarized_args()) {
 297         // Add entry point to unpack all inline type arguments
 298         C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
 299         if (!C->method()->is_static()) {
 300           // Add verified/unverified entry points to only unpack inline type receiver at interface calls
 301           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
 302           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
 303           C->cfg()->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
 304         }
 305       } else if (!C->method()->is_static()) {
 306         // Insert unvalidated entry point
 307         C->cfg()->insert(broot, 0, new MachUEPNode());
 308       }
 309     }

 310   }
 311 
 312   // Break before main entry point
 313   if ((C->method() && C->directive()->BreakAtExecuteOption) ||
 314       (OptoBreakpoint && C->is_method_compilation())       ||
 315       (OptoBreakpointOSR && C->is_osr_compilation())       ||
 316       (OptoBreakpointC2R && !C->method())                   ) {
 317     // checking for C->method() means that OptoBreakpoint does not apply to
 318     // runtime stubs or frame converters
 319     C->cfg()->insert( entry, 1, new MachBreakpointNode() );
 320   }
 321 
 322   // Insert epilogs before every return
 323   for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
 324     Block* block = C->cfg()->get_block(i);
 325     if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
 326       Node* m = block->end();
 327       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 328         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 329         block->add_inst(epilog);
 330         C->cfg()->map_node_to_block(epilog, block);
 331       }
 332     }
 333   }
 334 
 335   // Keeper of sizing aspects
 336   _buf_sizes = BufferSizingData();
 337 
 338   // Initialize code buffer
 339   estimate_buffer_size(_buf_sizes._const);
 340   if (C->failing()) return;
 341 
 342   // Pre-compute the length of blocks and replace
 343   // long branches with short if machine supports it.
 344   // Must be done before ScheduleAndBundle due to SPARC delay slots
 345   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
 346   blk_starts[0] = 0;
 347   shorten_branches(blk_starts);
 348 
 349   if (!C->is_osr_compilation() && C->has_scalarized_args()) {
 350     // Compute the offsets of the entry points required by the inline type calling convention
 351     if (!C->method()->is_static()) {
 352       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 353       // Entry                     (unverified) @ offset 0
 354       // Verified_Inline_Entry_RO
 355       // Inline_Entry              (unverified)
 356       // Verified_Inline_Entry
 357       uint offset = 0;
 358       _code_offsets.set_value(CodeOffsets::Entry, offset);
 359 
 360       offset += ((MachVEPNode*)broot->get_node(0))->size(C->regalloc());
 361       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, offset);
 362 
 363       offset += ((MachVEPNode*)broot->get_node(1))->size(C->regalloc());
 364       _code_offsets.set_value(CodeOffsets::Inline_Entry, offset);
 365 
 366       offset += ((MachVEPNode*)broot->get_node(2))->size(C->regalloc());
 367       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, offset);
 368     } else {
 369       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 370       _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, 0);
 371     }
 372   }
 373 
 374   ScheduleAndBundle();
 375   if (C->failing()) {
 376     return;
 377   }
 378 
 379   perform_mach_node_analysis();
 380 
 381   // Complete sizing of codebuffer
 382   CodeBuffer* cb = init_buffer();
 383   if (cb == nullptr || C->failing()) {
 384     return;
 385   }
 386 
 387   BuildOopMaps();
 388 
 389   if (C->failing())  {
 390     return;
 391   }
 392 
 393   C2_MacroAssembler masm(cb);

 515     // Sum all instruction sizes to compute block size
 516     uint last_inst = block->number_of_nodes();
 517     uint blk_size = 0;
 518     for (uint j = 0; j < last_inst; j++) {
 519       _index = j;
 520       Node* nj = block->get_node(_index);
 521       // Handle machine instruction nodes
 522       if (nj->is_Mach()) {
 523         MachNode* mach = nj->as_Mach();
 524         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 525         reloc_size += mach->reloc();
 526         if (mach->is_MachCall()) {
 527           // add size information for trampoline stub
 528           // class CallStubImpl is platform-specific and defined in the *.ad files.
 529           stub_size  += CallStubImpl::size_call_trampoline();
 530           reloc_size += CallStubImpl::reloc_call_trampoline();
 531 
 532           MachCallNode *mcall = mach->as_MachCall();
 533           // This destination address is NOT PC-relative
 534 
 535           if (mcall->entry_point() != nullptr) {
 536             mcall->method_set((intptr_t)mcall->entry_point());
 537           }
 538 
 539           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 540             stub_size  += CompiledDirectCall::to_interp_stub_size();
 541             reloc_size += CompiledDirectCall::reloc_to_interp_stub();
 542           }
 543         } else if (mach->is_MachSafePoint()) {
 544           // If call/safepoint are adjacent, account for possible
 545           // nop to disambiguate the two safepoints.
 546           // ScheduleAndBundle() can rearrange nodes in a block,
 547           // check for all offsets inside this block.
 548           if (last_call_adr >= blk_starts[i]) {
 549             blk_size += nop_size;
 550           }
 551         }
 552         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 553           // Nop is inserted between "avoid back to back" instructions.
 554           // ScheduleAndBundle() can rearrange nodes in a block,
 555           // check for all offsets inside this block.
 556           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
 557             blk_size += nop_size;

 772     // New functionality:
 773     //   Assert if the local is not top. In product mode let the new node
 774     //   override the old entry.
 775     assert(local == C->top(), "LocArray collision");
 776     if (local == C->top()) {
 777       return;
 778     }
 779     array->pop();
 780   }
 781   const Type *t = local->bottom_type();
 782 
 783   // Is it a safepoint scalar object node?
 784   if (local->is_SafePointScalarObject()) {
 785     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
 786 
 787     ObjectValue* sv = sv_for_node_id(objs, spobj->_idx);
 788     if (sv == nullptr) {
 789       ciKlass* cik = t->is_oopptr()->exact_klass();
 790       assert(cik->is_instance_klass() ||
 791              cik->is_array_klass(), "Not supported allocation.");
 792       uint first_ind = spobj->first_index(sfpt->jvms());
 793       // Nullable, scalarized inline types have a null_marker input
 794       // that needs to be checked before using the field values.
 795       ScopeValue* properties = nullptr;
 796       if (cik->is_inlinetype()) {
 797         Node* null_marker_node = sfpt->in(first_ind++);
 798         assert(null_marker_node != nullptr, "null_marker node not found");
 799         if (!null_marker_node->is_top()) {
 800           const TypeInt* null_marker_type = null_marker_node->bottom_type()->is_int();
 801           if (null_marker_node->is_Con()) {
 802             properties = new ConstantIntValue(null_marker_type->get_con());
 803           } else {
 804             OptoReg::Name null_marker_reg = C->regalloc()->get_reg_first(null_marker_node);
 805             properties = new_loc_value(C->regalloc(), null_marker_reg, Location::normal);
 806           }
 807         }
 808       }
 809       if (cik->is_array_klass() && !cik->is_type_array_klass()) {
 810         jint props = ArrayKlass::ArrayProperties::DEFAULT;
 811         if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
 812           if (cik->as_array_klass()->is_elem_null_free()) {
 813             props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
 814           }
 815           if (!cik->as_array_klass()->is_elem_atomic()) {
 816             props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
 817           }
 818         }
 819         properties = new ConstantIntValue(props);
 820       }
 821       sv = new ObjectValue(spobj->_idx,
 822                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
 823       set_sv_for_object_node(objs, sv);
 824 

 825       for (uint i = 0; i < spobj->n_fields(); i++) {
 826         Node* fld_node = sfpt->in(first_ind+i);
 827         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 828       }
 829     }
 830     array->append(sv);
 831     return;
 832   } else if (local->is_SafePointScalarMerge()) {
 833     SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
 834     ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
 835 
 836     if (mv == nullptr) {
 837       GrowableArray<ScopeValue*> deps;
 838 
 839       int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
 840       (void)FillLocArray(0, sfpt, sfpt->in(merge_pointer_idx), &deps, objs);
 841       assert(deps.length() == 1, "missing value");
 842 
 843       int selector_idx = smerge->selector_idx(sfpt->jvms());
 844       (void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);

1050     if (!n->is_SafePointScalarObject()) {
1051       continue;
1052     }
1053 
1054     ObjectValue* other = sv_for_node_id(objs, n->_idx);
1055     if (ov == other) {
1056       return true;
1057     }
1058   }
1059   return false;
1060 }
1061 
1062 //--------------------------Process_OopMap_Node--------------------------------
1063 void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
1064   // Handle special safepoint nodes for synchronization
1065   MachSafePointNode *sfn   = mach->as_MachSafePoint();
1066   MachCallNode      *mcall;
1067 
1068   int safepoint_pc_offset = current_offset;
1069   bool return_oop = false;
1070   bool return_scalarized = false;
1071   bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
1072   bool arg_escape = false;
1073 
1074   // Add the safepoint in the DebugInfoRecorder
1075   if( !mach->is_MachCall() ) {
1076     mcall = nullptr;
1077     C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
1078   } else {
1079     mcall = mach->as_MachCall();
1080 
1081     if (mcall->is_MachCallJava()) {
1082       arg_escape = mcall->as_MachCallJava()->_arg_escape;
1083     }
1084 
1085     // Check if a call returns an object.
1086     if (mcall->returns_pointer() || mcall->returns_scalarized()) {
1087       return_oop = true;
1088     }
1089     if (mcall->returns_scalarized()) {
1090       return_scalarized = true;
1091     }
1092     safepoint_pc_offset += mcall->ret_addr_offset();
1093     C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
1094   }
1095 
1096   // Loop over the JVMState list to add scope information
1097   // Do not skip safepoints with a null method, they need monitor info
1098   JVMState* youngest_jvms = sfn->jvms();
1099   int max_depth = youngest_jvms->depth();
1100 
1101   // Allocate the object pool for scalar-replaced objects -- the map from
1102   // small-integer keys (which can be recorded in the local and ostack
1103   // arrays) to descriptions of the object state.
1104   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
1105 
1106   // Visit scopes from oldest to youngest.
1107   for (int depth = 1; depth <= max_depth; depth++) {
1108     JVMState* jvms = youngest_jvms->of_depth(depth);
1109     int idx;
1110     ciMethod* method = jvms->has_method() ? jvms->method() : nullptr;
1111     // Safepoints that do not have method() set only provide oop-map and monitor info

1140     // Build the growable array of ScopeValues for exp stack
1141     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
1142 
1143     // Loop over monitors and insert into array
1144     for (idx = 0; idx < num_mon; idx++) {
1145       // Grab the node that defines this monitor
1146       Node* box_node = sfn->monitor_box(jvms, idx);
1147       Node* obj_node = sfn->monitor_obj(jvms, idx);
1148 
1149       // Create ScopeValue for object
1150       ScopeValue *scval = nullptr;
1151 
1152       if (obj_node->is_SafePointScalarObject()) {
1153         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
1154         scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx);
1155         if (scval == nullptr) {
1156           const Type *t = spobj->bottom_type();
1157           ciKlass* cik = t->is_oopptr()->exact_klass();
1158           assert(cik->is_instance_klass() ||
1159                  cik->is_array_klass(), "Not supported allocation.");
1160           assert(!cik->is_inlinetype(), "Synchronization on value object?");
1161           ScopeValue* properties = nullptr;
1162           if (cik->is_array_klass() && !cik->is_type_array_klass()) {
1163             jint props = ArrayKlass::ArrayProperties::DEFAULT;
1164             if (cik->as_array_klass()->element_klass()->is_inlinetype()) {
1165               if (cik->as_array_klass()->is_elem_null_free()) {
1166                 props |= ArrayKlass::ArrayProperties::NULL_RESTRICTED;
1167               }
1168               if (!cik->as_array_klass()->is_elem_atomic()) {
1169                 props |= ArrayKlass::ArrayProperties::NON_ATOMIC;
1170               }
1171             }
1172             properties = new ConstantIntValue(props);
1173           }
1174           ObjectValue* sv = new ObjectValue(spobj->_idx,
1175                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()), true, properties);
1176           PhaseOutput::set_sv_for_object_node(objs, sv);
1177 
1178           uint first_ind = spobj->first_index(youngest_jvms);
1179           for (uint i = 0; i < spobj->n_fields(); i++) {
1180             Node* fld_node = sfn->in(first_ind+i);
1181             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
1182           }
1183           scval = sv;
1184         }
1185       } else if (obj_node->is_SafePointScalarMerge()) {
1186         SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
1187         ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
1188 
1189         if (mv == nullptr) {
1190           GrowableArray<ScopeValue*> deps;
1191 
1192           int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
1193           FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
1194           assert(deps.length() == 1, "missing value");
1195 

1262     DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
1263     DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
1264     DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
1265 
1266     // Make method available for all Safepoints
1267     ciMethod* scope_method = method ? method : C->method();
1268     // Describe the scope here
1269     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
1270     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
1271     // Now we can describe the scope.
1272     methodHandle null_mh;
1273     bool rethrow_exception = false;
1274     C->debug_info()->describe_scope(
1275       safepoint_pc_offset,
1276       null_mh,
1277       scope_method,
1278       jvms->bci(),
1279       jvms->should_reexecute(),
1280       rethrow_exception,
1281       return_oop,
1282       return_scalarized,
1283       has_ea_local_in_scope,
1284       arg_escape,
1285       locvals,
1286       expvals,
1287       monvals
1288     );
1289   } // End jvms loop
1290 
1291   // Mark the end of the scope set.
1292   C->debug_info()->end_safepoint(safepoint_pc_offset);
1293 }
1294 
1295 
1296 
1297 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
1298 class NonSafepointEmitter {
1299     Compile*  C;
1300     JVMState* _pending_jvms;
1301     int       _pending_offset;
1302 

1618           MachNode *nop = new MachNopNode(nops_cnt);
1619           block->insert_node(nop, j++);
1620           last_inst++;
1621           C->cfg()->map_node_to_block(nop, block);
1622           // Ensure enough space.
1623           masm->code()->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1624           if ((masm->code()->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) {
1625             C->record_failure("CodeCache is full");
1626             return;
1627           }
1628           nop->emit(masm, C->regalloc());
1629           masm->code()->flush_bundle(true);
1630           current_offset = masm->offset();
1631         }
1632 
1633         bool observe_safepoint = is_sfn;
1634         // Remember the start of the last call in a basic block
1635         if (is_mcall) {
1636           MachCallNode *mcall = mach->as_MachCall();
1637 
1638           if (mcall->entry_point() != nullptr) {
1639             // This destination address is NOT PC-relative
1640             mcall->method_set((intptr_t)mcall->entry_point());
1641           }
1642 
1643           // Save the return address
1644           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1645 
1646           observe_safepoint = mcall->guaranteed_safepoint();
1647         }
1648 
1649         // sfn will be valid whenever mcall is valid now because of inheritance
1650         if (observe_safepoint) {
1651           // Handle special safepoint nodes for synchronization
1652           if (!is_mcall) {
1653             MachSafePointNode *sfn = mach->as_MachSafePoint();
1654             // !!!!! Stubs only need an oopmap right now, so bail out
1655             if (sfn->jvms()->method() == nullptr) {
1656               // Write the oopmap directly to the code blob??!!
1657               continue;
1658             }
1659           } // End synchronization
1660 
1661           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),

1759       if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) {
1760         node_offsets[n->_idx] = masm->offset();
1761       }
1762 #endif
1763       assert(!C->failing_internal() || C->failure_is_artificial(), "Should not reach here if failing.");
1764 
1765       // "Normal" instruction case
1766       DEBUG_ONLY(uint instr_offset = masm->offset());
1767       n->emit(masm, C->regalloc());
1768       current_offset = masm->offset();
1769 
1770       // Above we only verified that there is enough space in the instruction section.
1771       // However, the instruction may emit stubs that cause code buffer expansion.
1772       // Bail out here if expansion failed due to a lack of code cache space.
1773       if (C->failing()) {
1774         return;
1775       }
1776 
1777       assert(!is_mcall || (call_returns[block->_pre_order] <= (uint)current_offset),
1778              "ret_addr_offset() not within emitted code");

1779 #ifdef ASSERT
1780       uint n_size = n->size(C->regalloc());
1781       if (n_size < (current_offset-instr_offset)) {
1782         MachNode* mach = n->as_Mach();
1783         n->dump();
1784         mach->dump_format(C->regalloc(), tty);
1785         tty->print_cr(" n_size (%d), current_offset (%d), instr_offset (%d)", n_size, current_offset, instr_offset);
1786         Disassembler::decode(masm->code()->insts_begin() + instr_offset, masm->code()->insts_begin() + current_offset + 1, tty);
1787         tty->print_cr(" ------------------- ");
1788         BufferBlob* blob = this->scratch_buffer_blob();
1789         address blob_begin = blob->content_begin();
1790         Disassembler::decode(blob_begin, blob_begin + n_size + 1, tty);
1791         assert(false, "wrong size of mach node");
1792       }
1793 #endif
1794       non_safepoints.observe_instruction(n, current_offset);
1795 
1796       // mcall is last "call" that can be a safepoint
1797       // record it so we can see if a poll will directly follow it
1798       // in which case we'll need a pad to make the PcDesc sites unique

3015         anti_do_use( b, n, _regalloc->get_reg_first(def) );
3016         anti_do_use( b, n, _regalloc->get_reg_second(def) );
3017       }
3018     }
3019     // Do not allow defs of new derived values to float above GC
3020     // points unless the base is definitely available at the GC point.
3021 
3022     Node *m = b->get_node(i);
3023 
3024     // Add precedence edge from following safepoint to use of derived pointer
3025     if( last_safept_node != end_node &&
3026         m != last_safept_node) {
3027       for (uint k = 1; k < m->req(); k++) {
3028         const Type *t = m->in(k)->bottom_type();
3029         if( t->isa_oop_ptr() &&
3030             t->is_ptr()->offset() != 0 ) {
3031           last_safept_node->add_prec( m );
3032           break;
3033         }
3034       }
3035 
3036       // Do not allow a CheckCastPP node whose input is a raw pointer to
3037       // float past a safepoint.  This can occur when a buffered inline
3038       // type is allocated in a loop and the CheckCastPP from that
3039       // allocation is reused outside the loop.  If the use inside the
3040       // loop is scalarized the CheckCastPP will no longer be connected
3041       // to the loop safepoint.  See JDK-8264340.
3042       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CheckCastPP) {
3043         Node *def = m->in(1);
3044         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
3045           last_safept_node->add_prec(m);
3046         }
3047       }
3048     }
3049 
3050     if( n->jvms() ) {           // Precedence edge from derived to safept
3051       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
3052       if( b->get_node(last_safept) != last_safept_node ) {
3053         last_safept = b->find_node(last_safept_node);
3054       }
3055       for( uint j=last_safept; j > i; j-- ) {
3056         Node *mach = b->get_node(j);
3057         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
3058           mach->add_prec( n );
3059       }
3060       last_safept = i;
3061       last_safept_node = m;
3062     }
3063   }
3064 
3065   if (fat_proj_seen) {
3066     // Garbage collect pinch nodes that were not consumed.
3067     // They are usually created by a fat kill MachProj for a call.

3176 }
3177 #endif
3178 
3179 //-----------------------init_scratch_buffer_blob------------------------------
3180 // Construct a temporary BufferBlob and cache it for this compile.
3181 void PhaseOutput::init_scratch_buffer_blob(int const_size) {
3182   // If there is already a scratch buffer blob allocated and the
3183   // constant section is big enough, use it.  Otherwise free the
3184   // current and allocate a new one.
3185   BufferBlob* blob = scratch_buffer_blob();
3186   if ((blob != nullptr) && (const_size <= _scratch_const_size)) {
3187     // Use the current blob.
3188   } else {
3189     if (blob != nullptr) {
3190       BufferBlob::free(blob);
3191     }
3192 
3193     ResourceMark rm;
3194     _scratch_const_size = const_size;
3195     int size = C2Compiler::initial_code_buffer_size(const_size);
3196     if (C->has_scalarized_args()) {
3197       // Inline type entry points (MachVEPNodes) require lots of space for GC barriers and oop verification
3198       // when loading object fields from the buffered argument. Increase scratch buffer size accordingly.
3199       ciMethod* method = C->method();
3200       int barrier_size = UseZGC ? 200 : (7 DEBUG_ONLY(+ 37));
3201       int arg_num = 0;
3202       if (!method->is_static()) {
3203         if (method->is_scalarized_arg(arg_num)) {
3204           size += method->holder()->as_inline_klass()->oop_count() * barrier_size;
3205         }
3206         arg_num++;
3207       }
3208       for (ciSignatureStream str(method->signature()); !str.at_return_type(); str.next()) {
3209         if (method->is_scalarized_arg(arg_num)) {
3210           size += str.type()->as_inline_klass()->oop_count() * barrier_size;
3211         }
3212         arg_num++;
3213       }
3214     }
3215     blob = BufferBlob::create("Compile::scratch_buffer", size);
3216     // Record the buffer blob for next time.
3217     set_scratch_buffer_blob(blob);
3218     // Have we run out of code space?
3219     if (scratch_buffer_blob() == nullptr) {
3220       // Let CompilerBroker disable further compilations.
3221       C->record_failure("Not enough space for scratch buffer in CodeCache");
3222       return;
3223     }
3224   }
3225 
3226   // Initialize the relocation buffers
3227   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
3228   set_scratch_locs_memory(locs_buf);
3229 }
3230 
3231 
3232 //-----------------------scratch_emit_size-------------------------------------
3233 // Helper function that computes size by emitting code
3234 uint PhaseOutput::scratch_emit_size(const Node* n) {

3265   buf.insts()->set_scratch_emit();
3266   buf.stubs()->set_scratch_emit();
3267 
3268   // Do the emission.
3269 
3270   Label fakeL; // Fake label for branch instructions.
3271   Label*   saveL = nullptr;
3272   uint save_bnum = 0;
3273   bool is_branch = n->is_MachBranch();
3274   C2_MacroAssembler masm(&buf);
3275   masm.bind(fakeL);
3276   if (is_branch) {
3277     n->as_MachBranch()->save_label(&saveL, &save_bnum);
3278     n->as_MachBranch()->label_set(&fakeL, 0);
3279   }
3280   n->emit(&masm, C->regalloc());
3281 
3282   // Emitting into the scratch buffer should not fail
3283   assert(!C->failing_internal() || C->failure_is_artificial(), "Must not have pending failure. Reason is: %s", C->failure_reason());
3284 
3285   // Restore label.
3286   if (is_branch) {
3287     n->as_MachBranch()->label_set(saveL, save_bnum);
3288   }
3289 
3290   // End scratch_emit_size section.
3291   set_in_scratch_emit_size(false);
3292 
3293   return buf.insts_size();
3294 }
3295 
3296 void PhaseOutput::install() {
3297   if (!C->should_install_code()) {
3298     return;
3299   } else if (C->stub_function() != nullptr) {
3300     install_stub(C->stub_name());
3301   } else {
3302     install_code(C->method(),
3303                  C->entry_bci(),
3304                  CompileBroker::compiler2(),
3305                  C->has_unsafe_access(),
3306                  SharedRuntime::is_wide_vector(C->max_vector_size()));
3307   }
3308 }
3309 
3310 void PhaseOutput::install_code(ciMethod*         target,
3311                                int               entry_bci,
3312                                AbstractCompiler* compiler,
3313                                bool              has_unsafe_access,
3314                                bool              has_wide_vectors) {
3315   // Check if we want to skip execution of all compiled code.
3316   {
3317 #ifndef PRODUCT
3318     if (OptoNoExecute) {
3319       C->record_method_not_compilable("+OptoNoExecute");  // Flag as failed
3320       return;
3321     }
3322 #endif
3323     Compile::TracePhase tp(_t_registerMethod);
3324 
3325     if (C->is_osr_compilation()) {
3326       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
3327       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
3328     } else {






3329       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
3330       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry) == -1) {
3331         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry, _first_block_size);
3332       }
3333       if (_code_offsets.value(CodeOffsets::Verified_Inline_Entry_RO) == -1) {
3334         _code_offsets.set_value(CodeOffsets::Verified_Inline_Entry_RO, _first_block_size);
3335       }
3336       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
3337         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
3338       }
3339       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
3340     }
3341 
3342     C->env()->register_method(target,
3343                               entry_bci,
3344                               &_code_offsets,
3345                               _orig_pc_slot_offset_in_bytes,
3346                               code_buffer(),
3347                               frame_size_in_words(),
3348                               _oop_map_set,
3349                               &_handler_table,
3350                               inc_table(),
3351                               compiler,
3352                               has_unsafe_access,
3353                               SharedRuntime::is_wide_vector(C->max_vector_size()),
3354                               C->has_monitors(),
3355                               C->has_scoped_access(),
3356                               0);
3357 
3358     if (C->log() != nullptr) { // Print code cache state into compiler log
3359       C->log()->code_cache_state();
3360     }
3361   }
3362 }
3363 void PhaseOutput::install_stub(const char* stub_name) {
3364   // Entry point will be accessed using stub_entry_point();
3365   if (code_buffer() == nullptr) {
3366     Matcher::soft_match_failure();
3367   } else {
3368     if (PrintAssembly && (WizardMode || Verbose))
3369       tty->print_cr("### Stub::%s", stub_name);
3370 
3371     if (!C->failing()) {
3372       assert(C->fixed_slots() == 0, "no fixed slots used for runtime stubs");
3373 
3374       // Make the NMethod
3375       // For now we mark the frame as never safe for profile stackwalking
3376       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
< prev index next >