< prev index next >

src/hotspot/share/opto/output.cpp

Print this page




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   MachPrologNode *prolog = new MachPrologNode();

  75   entry->map_node(prolog, 0);
  76   _cfg->map_node_to_block(prolog, entry);
  77   _cfg->unmap_node_from_block(start); // start is no longer in any block
  78 
  79   // Virtual methods need an unverified entry point
  80 
  81   if( is_osr_compilation() ) {
  82     if( PoisonOSREntry ) {
  83       // TODO: Should use a ShouldNotReachHereNode...
  84       _cfg->insert( broot, 0, new MachBreakpointNode() );
  85     }
  86   } else {
  87     if( _method && !_method->flags().is_static() ) {
  88       // Insert unvalidated entry point
  89       _cfg->insert( broot, 0, new MachUEPNode() );











  90     }
  91 
  92   }
  93 
  94   // Break before main entry point
  95   if ((_method && C->directive()->BreakAtExecuteOption) ||
  96       (OptoBreakpoint && is_method_compilation())       ||
  97       (OptoBreakpointOSR && is_osr_compilation())       ||
  98       (OptoBreakpointC2R && !_method)                   ) {
  99     // checking for _method means that OptoBreakpoint does not apply to
 100     // runtime stubs or frame converters
 101     _cfg->insert( entry, 1, new MachBreakpointNode() );
 102   }
 103 
 104   // Insert epilogs before every return
 105   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 106     Block* block = _cfg->get_block(i);
 107     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 108       Node* m = block->end();
 109       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 110         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 111         block->add_inst(epilog);
 112         _cfg->map_node_to_block(epilog, block);
 113       }
 114     }
 115   }
 116 
 117   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 118   blk_starts[0] = 0;
 119 
 120   // Initialize code buffer and process short branches.
 121   CodeBuffer* cb = init_buffer(blk_starts);
 122 
 123   if (cb == NULL || failing()) {
 124     return;
 125   }
 126 

























 127   ScheduleAndBundle();
 128 
 129 #ifndef PRODUCT
 130   if (trace_opto_output()) {
 131     tty->print("\n---- After ScheduleAndBundle ----\n");
 132     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 133       tty->print("\nBB#%03d:\n", i);
 134       Block* block = _cfg->get_block(i);
 135       for (uint j = 0; j < block->number_of_nodes(); j++) {
 136         Node* n = block->get_node(j);
 137         OptoReg::Name reg = _regalloc->get_reg_first(n);
 138         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 139         n->dump();
 140       }
 141     }
 142   }
 143 #endif
 144 
 145   if (failing()) {
 146     return;


 271 
 272     // Sum all instruction sizes to compute block size
 273     uint last_inst = block->number_of_nodes();
 274     uint blk_size = 0;
 275     for (uint j = 0; j < last_inst; j++) {
 276       Node* nj = block->get_node(j);
 277       // Handle machine instruction nodes
 278       if (nj->is_Mach()) {
 279         MachNode *mach = nj->as_Mach();
 280         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 281         reloc_size += mach->reloc();
 282         if (mach->is_MachCall()) {
 283           // add size information for trampoline stub
 284           // class CallStubImpl is platform-specific and defined in the *.ad files.
 285           stub_size  += CallStubImpl::size_call_trampoline();
 286           reloc_size += CallStubImpl::reloc_call_trampoline();
 287 
 288           MachCallNode *mcall = mach->as_MachCall();
 289           // This destination address is NOT PC-relative
 290 
 291           mcall->method_set((intptr_t)mcall->entry_point());


 292 
 293           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 294             stub_size  += CompiledStaticCall::to_interp_stub_size();
 295             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 296 #if INCLUDE_AOT
 297             stub_size  += CompiledStaticCall::to_aot_stub_size();
 298             reloc_size += CompiledStaticCall::reloc_to_aot_stub();
 299 #endif
 300           }
 301         } else if (mach->is_MachSafePoint()) {
 302           // If call/safepoint are adjacent, account for possible
 303           // nop to disambiguate the two safepoints.
 304           // ScheduleAndBundle() can rearrange nodes in a block,
 305           // check for all offsets inside this block.
 306           if (last_call_adr >= blk_starts[i]) {
 307             blk_size += nop_size;
 308           }
 309         }
 310         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 311           // Nop is inserted between "avoid back to back" instructions.


 709     break;
 710   }
 711 }
 712 
 713 // Determine if this node starts a bundle
 714 bool Compile::starts_bundle(const Node *n) const {
 715   return (_node_bundling_limit > n->_idx &&
 716           _node_bundling_base[n->_idx].starts_bundle());
 717 }
 718 
 719 //--------------------------Process_OopMap_Node--------------------------------
 720 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 721 
 722   // Handle special safepoint nodes for synchronization
 723   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 724   MachCallNode      *mcall;
 725 
 726   int safepoint_pc_offset = current_offset;
 727   bool is_method_handle_invoke = false;
 728   bool return_oop = false;

 729 
 730   // Add the safepoint in the DebugInfoRecorder
 731   if( !mach->is_MachCall() ) {
 732     mcall = NULL;
 733     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 734   } else {
 735     mcall = mach->as_MachCall();
 736 
 737     // Is the call a MethodHandle call?
 738     if (mcall->is_MachCallJava()) {
 739       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 740         assert(has_method_handle_invokes(), "must have been set during call generation");
 741         is_method_handle_invoke = true;
 742       }
 743     }
 744 
 745     // Check if a call returns an object.
 746     if (mcall->returns_pointer()) {
 747       return_oop = true;
 748     }



 749     safepoint_pc_offset += mcall->ret_addr_offset();
 750     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 751   }
 752 
 753   // Loop over the JVMState list to add scope information
 754   // Do not skip safepoints with a NULL method, they need monitor info
 755   JVMState* youngest_jvms = sfn->jvms();
 756   int max_depth = youngest_jvms->depth();
 757 
 758   // Allocate the object pool for scalar-replaced objects -- the map from
 759   // small-integer keys (which can be recorded in the local and ostack
 760   // arrays) to descriptions of the object state.
 761   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 762 
 763   // Visit scopes from oldest to youngest.
 764   for (int depth = 1; depth <= max_depth; depth++) {
 765     JVMState* jvms = youngest_jvms->of_depth(depth);
 766     int idx;
 767     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
 768     // Safepoints that do not have method() set only provide oop-map and monitor info


 843       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
 844       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
 845     }
 846 
 847     // We dump the object pool first, since deoptimization reads it in first.
 848     debug_info()->dump_object_pool(objs);
 849 
 850     // Build first class objects to pass to scope
 851     DebugToken *locvals = debug_info()->create_scope_values(locarray);
 852     DebugToken *expvals = debug_info()->create_scope_values(exparray);
 853     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
 854 
 855     // Make method available for all Safepoints
 856     ciMethod* scope_method = method ? method : _method;
 857     // Describe the scope here
 858     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
 859     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
 860     // Now we can describe the scope.
 861     methodHandle null_mh;
 862     bool rethrow_exception = false;
 863     debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
 864   } // End jvms loop
 865 
 866   // Mark the end of the scope set.
 867   debug_info()->end_safepoint(safepoint_pc_offset);
 868 }
 869 
 870 
 871 
 872 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
 873 class NonSafepointEmitter {
 874   Compile*  C;
 875   JVMState* _pending_jvms;
 876   int       _pending_offset;
 877 
 878   void emit_non_safepoint();
 879 
 880  public:
 881   NonSafepointEmitter(Compile* compile) {
 882     this->C = compile;
 883     _pending_jvms = NULL;


 952 }
 953 
 954 //------------------------------init_buffer------------------------------------
 955 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 956 
 957   // Set the initially allocated size
 958   int  code_req   = initial_code_capacity;
 959   int  locs_req   = initial_locs_capacity;
 960   int  stub_req   = initial_stub_capacity;
 961   int  const_req  = initial_const_capacity;
 962 
 963   int  pad_req    = NativeCall::instruction_size;
 964   // The extra spacing after the code is necessary on some platforms.
 965   // Sometimes we need to patch in a jump after the last instruction,
 966   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
 967 
 968   // Compute the byte offset where we can store the deopt pc.
 969   if (fixed_slots() != 0) {
 970     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
 971   }




 972 
 973   // Compute prolog code size
 974   _method_size = 0;
 975   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
 976 #if defined(IA64) && !defined(AIX)
 977   if (save_argument_registers()) {
 978     // 4815101: this is a stub with implicit and unknown precision fp args.
 979     // The usual spill mechanism can only generate stfd's in this case, which
 980     // doesn't work if the fp reg to spill contains a single-precision denorm.
 981     // Instead, we hack around the normal spill mechanism using stfspill's and
 982     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
 983     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
 984     //
 985     // If we ever implement 16-byte 'registers' == stack slots, we can
 986     // get rid of this hack and have SpillCopy generate stfspill/ldffill
 987     // instead of stfd/stfs/ldfd/ldfs.
 988     _frame_slots += 8*(16/BytesPerInt);
 989   }
 990 #endif
 991   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");


1221           int nops_cnt = padding / nop_size;
1222           MachNode *nop = new MachNopNode(nops_cnt);
1223           block->insert_node(nop, j++);
1224           last_inst++;
1225           _cfg->map_node_to_block(nop, block);
1226           // Ensure enough space.
1227           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1228           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1229             C->record_failure("CodeCache is full");
1230             return;
1231           }
1232           nop->emit(*cb, _regalloc);
1233           cb->flush_bundle(true);
1234           current_offset = cb->insts_size();
1235         }
1236 
1237         // Remember the start of the last call in a basic block
1238         if (is_mcall) {
1239           MachCallNode *mcall = mach->as_MachCall();
1240 
1241           // This destination address is NOT PC-relative
1242           mcall->method_set((intptr_t)mcall->entry_point());


1243 
1244           // Save the return address
1245           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1246 
1247           if (mcall->is_MachCallLeaf()) {
1248             is_mcall = false;
1249             is_sfn = false;
1250           }
1251         }
1252 
1253         // sfn will be valid whenever mcall is valid now because of inheritance
1254         if (is_sfn || is_mcall) {
1255 
1256           // Handle special safepoint nodes for synchronization
1257           if (!is_mcall) {
1258             MachSafePointNode *sfn = mach->as_MachSafePoint();
1259             // !!!!! Stubs only need an oopmap right now, so bail out
1260             if (sfn->jvms()->method() == NULL) {
1261               // Write the oopmap directly to the code blob??!!
1262               continue;




  54 
  55 // Convert Nodes to instruction bits and pass off to the VM
  56 void Compile::Output() {
  57   // RootNode goes
  58   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
  59 
  60   // The number of new nodes (mostly MachNop) is proportional to
  61   // the number of java calls and inner loops which are aligned.
  62   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
  63                             C->inner_loops()*(OptoLoopAlignment-1)),
  64                            "out of nodes before code generation" ) ) {
  65     return;
  66   }
  67   // Make sure I can find the Start Node
  68   Block *entry = _cfg->get_block(1);
  69   Block *broot = _cfg->get_root_block();
  70 
  71   const StartNode *start = entry->head()->as_Start();
  72 
  73   // Replace StartNode with prolog
  74   Label verified_entry;
  75   MachPrologNode* prolog = new MachPrologNode(&verified_entry);
  76   entry->map_node(prolog, 0);
  77   _cfg->map_node_to_block(prolog, entry);
  78   _cfg->unmap_node_from_block(start); // start is no longer in any block
  79 
  80   // Virtual methods need an unverified entry point
  81   if (is_osr_compilation()) {
  82     if (PoisonOSREntry) {

  83       // TODO: Should use a ShouldNotReachHereNode...
  84       _cfg->insert( broot, 0, new MachBreakpointNode() );
  85     }
  86   } else {
  87     if (_method) {
  88       if (_method->has_scalarized_args()) {
  89         // Add entry point to unpack all value type arguments
  90         _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true, /* receiver_only */ false));
  91         if (!_method->is_static()) {
  92           // Add verified/unverified entry points to only unpack value type receiver at interface calls
  93           _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ false));
  94           _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ true,  /* receiver_only */ true));
  95           _cfg->insert(broot, 0, new MachVEPNode(&verified_entry, /* verified */ false, /* receiver_only */ true));
  96         }
  97       } else if (!_method->is_static()) {
  98         // Insert unvalidated entry point
  99         _cfg->insert(broot, 0, new MachUEPNode());
 100       }
 101     }

 102   }
 103 
 104   // Break before main entry point
 105   if ((_method && C->directive()->BreakAtExecuteOption) ||
 106       (OptoBreakpoint && is_method_compilation())       ||
 107       (OptoBreakpointOSR && is_osr_compilation())       ||
 108       (OptoBreakpointC2R && !_method)                   ) {
 109     // checking for _method means that OptoBreakpoint does not apply to
 110     // runtime stubs or frame converters
 111     _cfg->insert( entry, 1, new MachBreakpointNode() );
 112   }
 113 
 114   // Insert epilogs before every return
 115   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 116     Block* block = _cfg->get_block(i);
 117     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
 118       Node* m = block->end();
 119       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
 120         MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
 121         block->add_inst(epilog);
 122         _cfg->map_node_to_block(epilog, block);
 123       }
 124     }
 125   }
 126 
 127   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
 128   blk_starts[0] = 0;
 129 
 130   // Initialize code buffer and process short branches.
 131   CodeBuffer* cb = init_buffer(blk_starts);
 132 
 133   if (cb == NULL || failing()) {
 134     return;
 135   }
 136 
 137   if (!is_osr_compilation() && _method && _method->has_scalarized_args()) {
 138     // Compute the offsets of the entry points required by the value type calling convention
 139     if (!_method->is_static()) {
 140       // We have entries at the beginning of the method, implemented by the first 4 nodes.
 141       // Entry                     (unverified) @ offset 0
 142       // Verified_Value_Entry_RO
 143       // Value_Entry               (unverified)
 144       // Verified_Value_Entry
 145       uint offset = 0;
 146       _code_offsets.set_value(CodeOffsets::Entry, offset);
 147 
 148       offset += ((MachVEPNode*)broot->get_node(0))->size(_regalloc);
 149       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry_RO, offset);
 150 
 151       offset += ((MachVEPNode*)broot->get_node(1))->size(_regalloc);
 152       _code_offsets.set_value(CodeOffsets::Value_Entry, offset);
 153 
 154       offset += ((MachVEPNode*)broot->get_node(2))->size(_regalloc);
 155       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, offset);
 156     } else {
 157       _code_offsets.set_value(CodeOffsets::Entry, -1); // will be patched later
 158       _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, 0);
 159     }
 160   }
 161 
 162   ScheduleAndBundle();
 163 
 164 #ifndef PRODUCT
 165   if (trace_opto_output()) {
 166     tty->print("\n---- After ScheduleAndBundle ----\n");
 167     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 168       tty->print("\nBB#%03d:\n", i);
 169       Block* block = _cfg->get_block(i);
 170       for (uint j = 0; j < block->number_of_nodes(); j++) {
 171         Node* n = block->get_node(j);
 172         OptoReg::Name reg = _regalloc->get_reg_first(n);
 173         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
 174         n->dump();
 175       }
 176     }
 177   }
 178 #endif
 179 
 180   if (failing()) {
 181     return;


 306 
 307     // Sum all instruction sizes to compute block size
 308     uint last_inst = block->number_of_nodes();
 309     uint blk_size = 0;
 310     for (uint j = 0; j < last_inst; j++) {
 311       Node* nj = block->get_node(j);
 312       // Handle machine instruction nodes
 313       if (nj->is_Mach()) {
 314         MachNode *mach = nj->as_Mach();
 315         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
 316         reloc_size += mach->reloc();
 317         if (mach->is_MachCall()) {
 318           // add size information for trampoline stub
 319           // class CallStubImpl is platform-specific and defined in the *.ad files.
 320           stub_size  += CallStubImpl::size_call_trampoline();
 321           reloc_size += CallStubImpl::reloc_call_trampoline();
 322 
 323           MachCallNode *mcall = mach->as_MachCall();
 324           // This destination address is NOT PC-relative
 325 
 326           if (mcall->entry_point() != NULL) {
 327             mcall->method_set((intptr_t)mcall->entry_point());
 328           }
 329 
 330           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
 331             stub_size  += CompiledStaticCall::to_interp_stub_size();
 332             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
 333 #if INCLUDE_AOT
 334             stub_size  += CompiledStaticCall::to_aot_stub_size();
 335             reloc_size += CompiledStaticCall::reloc_to_aot_stub();
 336 #endif
 337           }
 338         } else if (mach->is_MachSafePoint()) {
 339           // If call/safepoint are adjacent, account for possible
 340           // nop to disambiguate the two safepoints.
 341           // ScheduleAndBundle() can rearrange nodes in a block,
 342           // check for all offsets inside this block.
 343           if (last_call_adr >= blk_starts[i]) {
 344             blk_size += nop_size;
 345           }
 346         }
 347         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
 348           // Nop is inserted between "avoid back to back" instructions.


 746     break;
 747   }
 748 }
 749 
 750 // Determine if this node starts a bundle
 751 bool Compile::starts_bundle(const Node *n) const {
 752   return (_node_bundling_limit > n->_idx &&
 753           _node_bundling_base[n->_idx].starts_bundle());
 754 }
 755 
 756 //--------------------------Process_OopMap_Node--------------------------------
 757 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
 758 
 759   // Handle special safepoint nodes for synchronization
 760   MachSafePointNode *sfn   = mach->as_MachSafePoint();
 761   MachCallNode      *mcall;
 762 
 763   int safepoint_pc_offset = current_offset;
 764   bool is_method_handle_invoke = false;
 765   bool return_oop = false;
 766   bool return_vt = false;
 767 
 768   // Add the safepoint in the DebugInfoRecorder
 769   if( !mach->is_MachCall() ) {
 770     mcall = NULL;
 771     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
 772   } else {
 773     mcall = mach->as_MachCall();
 774 
 775     // Is the call a MethodHandle call?
 776     if (mcall->is_MachCallJava()) {
 777       if (mcall->as_MachCallJava()->_method_handle_invoke) {
 778         assert(has_method_handle_invokes(), "must have been set during call generation");
 779         is_method_handle_invoke = true;
 780       }
 781     }
 782 
 783     // Check if a call returns an object.
 784     if (mcall->returns_pointer() || mcall->returns_vt()) {
 785       return_oop = true;
 786     }
 787     if (mcall->returns_vt()) {
 788       return_vt = true;
 789     }
 790     safepoint_pc_offset += mcall->ret_addr_offset();
 791     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
 792   }
 793 
 794   // Loop over the JVMState list to add scope information
 795   // Do not skip safepoints with a NULL method, they need monitor info
 796   JVMState* youngest_jvms = sfn->jvms();
 797   int max_depth = youngest_jvms->depth();
 798 
 799   // Allocate the object pool for scalar-replaced objects -- the map from
 800   // small-integer keys (which can be recorded in the local and ostack
 801   // arrays) to descriptions of the object state.
 802   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
 803 
 804   // Visit scopes from oldest to youngest.
 805   for (int depth = 1; depth <= max_depth; depth++) {
 806     JVMState* jvms = youngest_jvms->of_depth(depth);
 807     int idx;
 808     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
 809     // Safepoints that do not have method() set only provide oop-map and monitor info


 884       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
 885       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
 886     }
 887 
 888     // We dump the object pool first, since deoptimization reads it in first.
 889     debug_info()->dump_object_pool(objs);
 890 
 891     // Build first class objects to pass to scope
 892     DebugToken *locvals = debug_info()->create_scope_values(locarray);
 893     DebugToken *expvals = debug_info()->create_scope_values(exparray);
 894     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
 895 
 896     // Make method available for all Safepoints
 897     ciMethod* scope_method = method ? method : _method;
 898     // Describe the scope here
 899     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
 900     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
 901     // Now we can describe the scope.
 902     methodHandle null_mh;
 903     bool rethrow_exception = false;
 904     debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(), jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke, return_oop, return_vt, locvals, expvals, monvals);
 905   } // End jvms loop
 906 
 907   // Mark the end of the scope set.
 908   debug_info()->end_safepoint(safepoint_pc_offset);
 909 }
 910 
 911 
 912 
 913 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
 914 class NonSafepointEmitter {
 915   Compile*  C;
 916   JVMState* _pending_jvms;
 917   int       _pending_offset;
 918 
 919   void emit_non_safepoint();
 920 
 921  public:
 922   NonSafepointEmitter(Compile* compile) {
 923     this->C = compile;
 924     _pending_jvms = NULL;


 993 }
 994 
 995 //------------------------------init_buffer------------------------------------
 996 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
 997 
 998   // Set the initially allocated size
 999   int  code_req   = initial_code_capacity;
1000   int  locs_req   = initial_locs_capacity;
1001   int  stub_req   = initial_stub_capacity;
1002   int  const_req  = initial_const_capacity;
1003 
1004   int  pad_req    = NativeCall::instruction_size;
1005   // The extra spacing after the code is necessary on some platforms.
1006   // Sometimes we need to patch in a jump after the last instruction,
1007   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
1008 
1009   // Compute the byte offset where we can store the deopt pc.
1010   if (fixed_slots() != 0) {
1011     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
1012   }
1013   if (C->needs_stack_repair()) {
1014     // Compute the byte offset of the stack increment value
1015     _sp_inc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_sp_inc_slot));
1016   }
1017 
1018   // Compute prolog code size
1019   _method_size = 0;
1020   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
1021 #if defined(IA64) && !defined(AIX)
1022   if (save_argument_registers()) {
1023     // 4815101: this is a stub with implicit and unknown precision fp args.
1024     // The usual spill mechanism can only generate stfd's in this case, which
1025     // doesn't work if the fp reg to spill contains a single-precision denorm.
1026     // Instead, we hack around the normal spill mechanism using stfspill's and
1027     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
1028     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1029     //
1030     // If we ever implement 16-byte 'registers' == stack slots, we can
1031     // get rid of this hack and have SpillCopy generate stfspill/ldffill
1032     // instead of stfd/stfs/ldfd/ldfs.
1033     _frame_slots += 8*(16/BytesPerInt);
1034   }
1035 #endif
1036   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");


1266           int nops_cnt = padding / nop_size;
1267           MachNode *nop = new MachNopNode(nops_cnt);
1268           block->insert_node(nop, j++);
1269           last_inst++;
1270           _cfg->map_node_to_block(nop, block);
1271           // Ensure enough space.
1272           cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1273           if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1274             C->record_failure("CodeCache is full");
1275             return;
1276           }
1277           nop->emit(*cb, _regalloc);
1278           cb->flush_bundle(true);
1279           current_offset = cb->insts_size();
1280         }
1281 
1282         // Remember the start of the last call in a basic block
1283         if (is_mcall) {
1284           MachCallNode *mcall = mach->as_MachCall();
1285 
1286           if (mcall->entry_point() != NULL) {
1287             // This destination address is NOT PC-relative
1288             mcall->method_set((intptr_t)mcall->entry_point());
1289           }
1290 
1291           // Save the return address
1292           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1293 
1294           if (mcall->is_MachCallLeaf()) {
1295             is_mcall = false;
1296             is_sfn = false;
1297           }
1298         }
1299 
1300         // sfn will be valid whenever mcall is valid now because of inheritance
1301         if (is_sfn || is_mcall) {
1302 
1303           // Handle special safepoint nodes for synchronization
1304           if (!is_mcall) {
1305             MachSafePointNode *sfn = mach->as_MachSafePoint();
1306             // !!!!! Stubs only need an oopmap right now, so bail out
1307             if (sfn->jvms()->method() == NULL) {
1308               // Write the oopmap directly to the code blob??!!
1309               continue;


< prev index next >