154 }
155
156 assert(method->holder()->is_loaded() , "method holder must be loaded");
157
158 // build graph if monitor pairing is ok
159 if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
160 }
161
162
163 int IRScope::max_stack() const {
164 int my_max = method()->max_stack();
165 int callee_max = 0;
166 for (int i = 0; i < number_of_callees(); i++) {
167 callee_max = MAX2(callee_max, callee_no(i)->max_stack());
168 }
169 return my_max + callee_max;
170 }
171
172
173 bool IRScopeDebugInfo::should_reexecute() {
174 ciMethod* cur_method = scope()->method();
175 int cur_bci = bci();
176 if (cur_method != nullptr && cur_bci != SynchronizationEntryBCI) {
177 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
178 return Interpreter::bytecode_should_reexecute(code);
179 } else
180 return false;
181 }
182
183
184 // Implementation of CodeEmitInfo
185
186 // Stack must be NON-null
187 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
188 : _scope_debug_info(nullptr)
189 , _scope(stack->scope())
190 , _exception_handlers(exception_handlers)
191 , _oop_map(nullptr)
192 , _stack(stack)
193 , _deoptimize_on_exception(deoptimize_on_exception)
194 , _force_reexecute(false) {
195 assert(_stack != nullptr, "must be non null");
196 }
197
198
199 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
200 : _scope_debug_info(nullptr)
201 , _scope(info->_scope)
202 , _exception_handlers(nullptr)
203 , _oop_map(nullptr)
204 , _stack(stack == nullptr ? info->_stack : stack)
205 , _deoptimize_on_exception(info->_deoptimize_on_exception)
206 , _force_reexecute(info->_force_reexecute) {
207
208 // deep copy of exception handlers
209 if (info->_exception_handlers != nullptr) {
210 _exception_handlers = new XHandlers(info->_exception_handlers);
211 }
212 }
213
214
215 void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
216 // record the safepoint before recording the debug info for enclosing scopes
217 recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
218 bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
219 _scope_debug_info->record_debug_info(recorder, pc_offset, reexecute);
220 recorder->end_safepoint(pc_offset);
221 }
222
223
224 void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
225 assert(_oop_map != nullptr, "oop map must already exist");
226 assert(opr->is_single_cpu(), "should not call otherwise");
227
228 VMReg name = frame_map()->regname(opr);
229 _oop_map->set_oop(name);
230 }
231
232 // Mirror the stack size calculation in the deopt code
233 // How much stack space would we need at this point in the program in
234 // case of deoptimization?
235 int CodeEmitInfo::interpreter_frame_size() const {
236 ValueStack* state = _stack;
237 int size = 0;
238 int callee_parameters = 0;
239 int callee_locals = 0;
|
154 }
155
156 assert(method->holder()->is_loaded() , "method holder must be loaded");
157
158 // build graph if monitor pairing is ok
159 if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
160 }
161
162
163 int IRScope::max_stack() const {
164 int my_max = method()->max_stack();
165 int callee_max = 0;
166 for (int i = 0; i < number_of_callees(); i++) {
167 callee_max = MAX2(callee_max, callee_no(i)->max_stack());
168 }
169 return my_max + callee_max;
170 }
171
172
173 bool IRScopeDebugInfo::should_reexecute() {
174 if (_should_reexecute) {
175 return true;
176 }
177 ciMethod* cur_method = scope()->method();
178 int cur_bci = bci();
179 if (cur_method != nullptr && cur_bci != SynchronizationEntryBCI) {
180 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
181 return Interpreter::bytecode_should_reexecute(code);
182 } else
183 return false;
184 }
185
186 // Implementation of CodeEmitInfo
187
188 // Stack must be NON-null
189 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
190 : _scope_debug_info(nullptr)
191 , _scope(stack->scope())
192 , _exception_handlers(exception_handlers)
193 , _oop_map(nullptr)
194 , _stack(stack)
195 , _deoptimize_on_exception(deoptimize_on_exception)
196 , _force_reexecute(false) {
197 assert(_stack != nullptr, "must be non null");
198 }
199
200
201 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
202 : _scope_debug_info(nullptr)
203 , _scope(info->_scope)
204 , _exception_handlers(nullptr)
205 , _oop_map(nullptr)
206 , _stack(stack == nullptr ? info->_stack : stack)
207 , _deoptimize_on_exception(info->_deoptimize_on_exception)
208 , _force_reexecute(info->_force_reexecute) {
209
210 // deep copy of exception handlers
211 if (info->_exception_handlers != nullptr) {
212 _exception_handlers = new XHandlers(info->_exception_handlers);
213 }
214 }
215
216
217 void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool maybe_return_as_fields) {
218 // record the safepoint before recording the debug info for enclosing scopes
219 recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
220 bool reexecute = _force_reexecute || _scope_debug_info->should_reexecute();
221 _scope_debug_info->record_debug_info(recorder, pc_offset, reexecute, maybe_return_as_fields);
222 recorder->end_safepoint(pc_offset);
223 }
224
225
226 void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
227 assert(_oop_map != nullptr, "oop map must already exist");
228 assert(opr->is_single_cpu(), "should not call otherwise");
229
230 VMReg name = frame_map()->regname(opr);
231 _oop_map->set_oop(name);
232 }
233
234 // Mirror the stack size calculation in the deopt code
235 // How much stack space would we need at this point in the program in
236 // case of deoptimization?
237 int CodeEmitInfo::interpreter_frame_size() const {
238 ValueStack* state = _stack;
239 int size = 0;
240 int callee_parameters = 0;
241 int callee_locals = 0;
|