< prev index next >

src/hotspot/share/c1/c1_IR.cpp

Print this page

 154   }
 155 
 156   assert(method->holder()->is_loaded() , "method holder must be loaded");
 157 
 158   // build graph if monitor pairing is ok
 159   if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
 160 }
 161 
 162 
 163 int IRScope::max_stack() const {
 164   int my_max = method()->max_stack();
 165   int callee_max = 0;
 166   for (int i = 0; i < number_of_callees(); i++) {
 167     callee_max = MAX2(callee_max, callee_no(i)->max_stack());
 168   }
 169   return my_max + callee_max;
 170 }
 171 
 172 
 173 bool IRScopeDebugInfo::should_reexecute() {



 174   ciMethod* cur_method = scope()->method();
 175   int       cur_bci    = bci();
 176   if (cur_method != nullptr && cur_bci != SynchronizationEntryBCI) {
 177     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 178     return Interpreter::bytecode_should_reexecute(code);
 179   } else
 180     return false;
 181 }
 182 
 183 
 184 // Implementation of CodeEmitInfo
 185 
 186 // Stack must be NON-null
 187 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
 188   : _scope_debug_info(nullptr)
 189   , _scope(stack->scope())
 190   , _exception_handlers(exception_handlers)
 191   , _oop_map(nullptr)
 192   , _stack(stack)
 193   , _is_method_handle_invoke(false)
 194   , _deoptimize_on_exception(deoptimize_on_exception)
 195   , _force_reexecute(false) {
 196   assert(_stack != nullptr, "must be non null");
 197 }
 198 
 199 
 200 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
 201   : _scope_debug_info(nullptr)
 202   , _scope(info->_scope)
 203   , _exception_handlers(nullptr)
 204   , _oop_map(nullptr)
 205   , _stack(stack == nullptr ? info->_stack : stack)
 206   , _is_method_handle_invoke(info->_is_method_handle_invoke)
 207   , _deoptimize_on_exception(info->_deoptimize_on_exception)
 208   , _force_reexecute(info->_force_reexecute) {
 209 
 210   // deep copy of exception handlers
 211   if (info->_exception_handlers != nullptr) {
 212     _exception_handlers = new XHandlers(info->_exception_handlers);
 213   }
 214 }
 215 
 216 
 217 void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
 218   // record the safepoint before recording the debug info for enclosing scopes
 219   recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
 220   bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
 221   _scope_debug_info->record_debug_info(recorder, pc_offset, reexecute, _is_method_handle_invoke);
 222   recorder->end_safepoint(pc_offset);
 223 }
 224 
 225 
 226 void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
 227   assert(_oop_map != nullptr, "oop map must already exist");
 228   assert(opr->is_single_cpu(), "should not call otherwise");
 229 
 230   VMReg name = frame_map()->regname(opr);
 231   _oop_map->set_oop(name);
 232 }
 233 
 234 // Mirror the stack size calculation in the deopt code
 235 // How much stack space would we need at this point in the program in
 236 // case of deoptimization?
 237 int CodeEmitInfo::interpreter_frame_size() const {
 238   ValueStack* state = _stack;
 239   int size = 0;
 240   int callee_parameters = 0;
 241   int callee_locals = 0;

 154   }
 155 
 156   assert(method->holder()->is_loaded() , "method holder must be loaded");
 157 
 158   // build graph if monitor pairing is ok
 159   if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
 160 }
 161 
 162 
 163 int IRScope::max_stack() const {
 164   int my_max = method()->max_stack();
 165   int callee_max = 0;
 166   for (int i = 0; i < number_of_callees(); i++) {
 167     callee_max = MAX2(callee_max, callee_no(i)->max_stack());
 168   }
 169   return my_max + callee_max;
 170 }
 171 
 172 
 173 bool IRScopeDebugInfo::should_reexecute() {
 174   if (_should_reexecute) {
 175     return true;
 176   }
 177   ciMethod* cur_method = scope()->method();
 178   int       cur_bci    = bci();
 179   if (cur_method != nullptr && cur_bci != SynchronizationEntryBCI) {
 180     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 181     return Interpreter::bytecode_should_reexecute(code);
 182   } else
 183     return false;
 184 }
 185 

 186 // Implementation of CodeEmitInfo
 187 
 188 // Stack must be NON-null
 189 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
 190   : _scope_debug_info(nullptr)
 191   , _scope(stack->scope())
 192   , _exception_handlers(exception_handlers)
 193   , _oop_map(nullptr)
 194   , _stack(stack)
 195   , _is_method_handle_invoke(false)
 196   , _deoptimize_on_exception(deoptimize_on_exception)
 197   , _force_reexecute(false) {
 198   assert(_stack != nullptr, "must be non null");
 199 }
 200 
 201 
 202 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
 203   : _scope_debug_info(nullptr)
 204   , _scope(info->_scope)
 205   , _exception_handlers(nullptr)
 206   , _oop_map(nullptr)
 207   , _stack(stack == nullptr ? info->_stack : stack)
 208   , _is_method_handle_invoke(info->_is_method_handle_invoke)
 209   , _deoptimize_on_exception(info->_deoptimize_on_exception)
 210   , _force_reexecute(info->_force_reexecute) {
 211 
 212   // deep copy of exception handlers
 213   if (info->_exception_handlers != nullptr) {
 214     _exception_handlers = new XHandlers(info->_exception_handlers);
 215   }
 216 }
 217 
 218 
 219 void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool maybe_return_as_fields) {
 220   // record the safepoint before recording the debug info for enclosing scopes
 221   recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
 222   bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
 223   _scope_debug_info->record_debug_info(recorder, pc_offset, reexecute, _is_method_handle_invoke, maybe_return_as_fields);
 224   recorder->end_safepoint(pc_offset);
 225 }
 226 
 227 
 228 void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
 229   assert(_oop_map != nullptr, "oop map must already exist");
 230   assert(opr->is_single_cpu(), "should not call otherwise");
 231 
 232   VMReg name = frame_map()->regname(opr);
 233   _oop_map->set_oop(name);
 234 }
 235 
 236 // Mirror the stack size calculation in the deopt code
 237 // How much stack space would we need at this point in the program in
 238 // case of deoptimization?
 239 int CodeEmitInfo::interpreter_frame_size() const {
 240   ValueStack* state = _stack;
 241   int size = 0;
 242   int callee_parameters = 0;
 243   int callee_locals = 0;
< prev index next >