1 /*
2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_C1_C1_GRAPHBUILDER_HPP
26 #define SHARE_C1_C1_GRAPHBUILDER_HPP
27
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_IR.hpp"
30 #include "c1/c1_ValueMap.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciMethodData.hpp"
33 #include "ci/ciStreams.hpp"
34 #include "compiler/compileLog.hpp"
35
36 class MemoryBuffer;
37
38 class DelayedFieldAccess : public CompilationResourceObj {
39 private:
40 Value _obj;
41 ciInstanceKlass* _holder;
42 int _offset;
43 ValueStack* _state_before;
44
45 public:
46 DelayedFieldAccess(Value obj, ciInstanceKlass* holder, int offset, ValueStack* state_before)
47 : _obj(obj), _holder(holder) , _offset(offset), _state_before(state_before) { }
48
49 Value obj() const { return _obj; }
50 ciInstanceKlass* holder() const { return _holder; }
51 int offset() const { return _offset; }
52 void inc_offset(int offset) { _offset += offset; }
53 ValueStack* state_before() const { return _state_before; }
54 };
55
56 class GraphBuilder {
57 friend class JfrResolution;
58 private:
59 // Per-scope data. These are pushed and popped as we descend into
60 // inlined methods. Currently in order to generate good code in the
61 // inliner we have to attempt to inline methods directly into the
62 // basic block we are parsing; this adds complexity.
63 class ScopeData: public CompilationResourceObj {
64 private:
65 ScopeData* _parent;
66 // bci-to-block mapping
67 BlockList* _bci2block;
68 // Scope
69 IRScope* _scope;
70 // Whether this scope or any parent scope has exception handlers
71 bool _has_handler;
72 // The bytecodes
73 ciBytecodeStream* _stream;
74
75 // Work list
76 BlockList* _work_list;
77
78 // Maximum inline size for this scope
79 intx _max_inline_size;
80 // Expression stack depth at point where inline occurred
81 int _caller_stack_size;
82
83 // The continuation point for the inline. Currently only used in
84 // multi-block inlines, but eventually would like to use this for
85 // all inlines for uniformity and simplicity; in this case would
86 // get the continuation point from the BlockList instead of
87 // fabricating it anew because Invokes would be considered to be
88 // BlockEnds.
89 BlockBegin* _continuation;
90
91 // Was this ScopeData created only for the parsing and inlining of
92 // a jsr?
93 bool _parsing_jsr;
94 // We track the destination bci of the jsr only to determine
95 // bailout conditions, since we only handle a subset of all of the
96 // possible jsr-ret control structures. Recursive invocations of a
97 // jsr are disallowed by the verifier.
98 int _jsr_entry_bci;
99 // We need to track the local variable in which the return address
100 // was stored to ensure we can handle inlining the jsr, because we
101 // don't handle arbitrary jsr/ret constructs.
102 int _jsr_ret_addr_local;
103 // If we are parsing a jsr, the continuation point for rets
104 BlockBegin* _jsr_continuation;
105 // Cloned XHandlers for jsr-related ScopeDatas
106 XHandlers* _jsr_xhandlers;
107
108 // Number of returns seen in this scope
109 int _num_returns;
110
111 // In order to generate profitable code for inlining, we currently
112 // have to perform an optimization for single-block inlined
113 // methods where we continue parsing into the same block. This
114 // allows us to perform CSE across inlined scopes and to avoid
115 // storing parameters to the stack. Having a global register
116 // allocator and being able to perform global CSE would allow this
117 // code to be removed and thereby simplify the inliner.
118 BlockBegin* _cleanup_block; // The block to which the return was added
119 Instruction* _cleanup_return_prev; // Instruction before return instruction
120 ValueStack* _cleanup_state; // State of that block (not yet pinned)
121
122 // When inlining do not push the result on the stack
123 bool _ignore_return;
124
125 public:
126 ScopeData(ScopeData* parent);
127
128 ScopeData* parent() const { return _parent; }
129
130 BlockList* bci2block() const { return _bci2block; }
131 void set_bci2block(BlockList* bci2block) { _bci2block = bci2block; }
132
133 // NOTE: this has a different effect when parsing jsrs
134 BlockBegin* block_at(int bci);
135
136 IRScope* scope() const { return _scope; }
137 // Has side-effect of setting has_handler flag
138 void set_scope(IRScope* scope);
139
140 // Whether this or any parent scope has exception handlers
141 bool has_handler() const { return _has_handler; }
142 void set_has_handler() { _has_handler = true; }
143
144 // Exception handlers list to be used for this scope
145 XHandlers* xhandlers() const;
146
147 // How to get a block to be parsed
148 void add_to_work_list(BlockBegin* block);
149 // How to remove the next block to be parsed; returns null if none left
150 BlockBegin* remove_from_work_list();
151 // Indicates parse is over
152 bool is_work_list_empty() const;
153
154 ciBytecodeStream* stream() { return _stream; }
155 void set_stream(ciBytecodeStream* stream) { _stream = stream; }
156
157 intx max_inline_size() const { return _max_inline_size; }
158
159 BlockBegin* continuation() const { return _continuation; }
160 void set_continuation(BlockBegin* cont) { _continuation = cont; }
161
162 // Indicates whether this ScopeData was pushed only for the
163 // parsing and inlining of a jsr
164 bool parsing_jsr() const { return _parsing_jsr; }
165 void set_parsing_jsr() { _parsing_jsr = true; }
166 int jsr_entry_bci() const { return _jsr_entry_bci; }
167 void set_jsr_entry_bci(int bci) { _jsr_entry_bci = bci; }
168 void set_jsr_return_address_local(int local_no){ _jsr_ret_addr_local = local_no; }
169 int jsr_return_address_local() const { return _jsr_ret_addr_local; }
170 // Must be called after scope is set up for jsr ScopeData
171 void setup_jsr_xhandlers();
172
173 // The jsr continuation is only used when parsing_jsr is true, and
174 // is different from the "normal" continuation since we can end up
175 // doing a return (rather than a ret) from within a subroutine
176 BlockBegin* jsr_continuation() const { return _jsr_continuation; }
177 void set_jsr_continuation(BlockBegin* cont) { _jsr_continuation = cont; }
178
179 int num_returns();
180 void incr_num_returns();
181
182 void set_inline_cleanup_info(BlockBegin* block,
183 Instruction* return_prev,
184 ValueStack* return_state);
185 BlockBegin* inline_cleanup_block() const { return _cleanup_block; }
186 Instruction* inline_cleanup_return_prev() const{ return _cleanup_return_prev; }
187 ValueStack* inline_cleanup_state() const { return _cleanup_state; }
188
189 bool ignore_return() const { return _ignore_return; }
190 void set_ignore_return(bool ignore_return) { _ignore_return = ignore_return; }
191 };
192
193 // for all GraphBuilders
194 static bool _can_trap[Bytecodes::number_of_java_codes];
195
196 // for each instance of GraphBuilder
197 ScopeData* _scope_data; // Per-scope data; used for inlining
198 Compilation* _compilation; // the current compilation
199 ValueMap* _vmap; // the map of values encountered (for CSE)
200 MemoryBuffer* _memory;
201 const char* _inline_bailout_msg; // non-null if most recent inline attempt failed
202 int _instruction_count; // for bailing out in pathological jsr/ret cases
203 BlockBegin* _start; // the start block
204 BlockBegin* _osr_entry; // the osr entry block block
205 ValueStack* _initial_state; // The state for the start block
206
207 // for each call to connect_to_end; can also be set by inliner
208 BlockBegin* _block; // the current block
209 ValueStack* _state; // the current execution state
210 Instruction* _last; // the last instruction added
211 bool _skip_block; // skip processing of the rest of this block
212
213 // support for optimization of accesses to flat fields and flat arrays
214 DelayedFieldAccess* _pending_field_access;
215 DelayedLoadIndexed* _pending_load_indexed;
216
217 // accessors
218 ScopeData* scope_data() const { return _scope_data; }
219 Compilation* compilation() const { return _compilation; }
220 BlockList* bci2block() const { return scope_data()->bci2block(); }
221 ValueMap* vmap() const { assert(UseLocalValueNumbering, "should not access otherwise"); return _vmap; }
222 bool has_handler() const { return scope_data()->has_handler(); }
223
224 BlockBegin* block() const { return _block; }
225 ValueStack* state() const { return _state; }
226 void set_state(ValueStack* state) { _state = state; }
227 IRScope* scope() const { return scope_data()->scope(); }
228 ciMethod* method() const { return scope()->method(); }
229 ciBytecodeStream* stream() const { return scope_data()->stream(); }
230 Instruction* last() const { return _last; }
231 Bytecodes::Code code() const { return stream()->cur_bc(); }
232 int bci() const { return stream()->cur_bci(); }
233 int next_bci() const { return stream()->next_bci(); }
234 bool has_pending_field_access() { return _pending_field_access != nullptr; }
235 DelayedFieldAccess* pending_field_access() { return _pending_field_access; }
236 void set_pending_field_access(DelayedFieldAccess* delayed) { _pending_field_access = delayed; }
237 bool has_pending_load_indexed() { return _pending_load_indexed != nullptr; }
238 DelayedLoadIndexed* pending_load_indexed() { return _pending_load_indexed; }
239 void set_pending_load_indexed(DelayedLoadIndexed* delayed) { _pending_load_indexed = delayed; }
240
241 // unified bailout support
242 void bailout(const char* msg) const { compilation()->bailout(msg); }
243 bool bailed_out() const { return compilation()->bailed_out(); }
244
245 // stack manipulation helpers
246 void ipush(Value t) const { state()->ipush(t); }
247 void lpush(Value t) const { state()->lpush(t); }
248 void fpush(Value t) const { state()->fpush(t); }
249 void dpush(Value t) const { state()->dpush(t); }
250 void apush(Value t) const { state()->apush(t); }
251 void push(ValueType* type, Value t) const { state()-> push(type, t); }
252
253 Value ipop() { return state()->ipop(); }
254 Value lpop() { return state()->lpop(); }
255 Value fpop() { return state()->fpop(); }
256 Value dpop() { return state()->dpop(); }
257 Value apop() { return state()->apop(); }
258 Value pop(ValueType* type) { return state()-> pop(type); }
259
260 // instruction helpers
261 void load_constant();
262 void load_local(ValueType* type, int index);
263 void store_local(ValueType* type, int index);
264 void store_local(ValueStack* state, Value value, int index);
265 void load_indexed (BasicType type);
266 void store_indexed(BasicType type);
267 void stack_op(Bytecodes::Code code);
268 void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before = nullptr);
269 void negate_op(ValueType* type);
270 void shift_op(ValueType* type, Bytecodes::Code code);
271 void logic_op(ValueType* type, Bytecodes::Code code);
272 void compare_op(ValueType* type, Bytecodes::Code code);
273 void convert(Bytecodes::Code op, BasicType from, BasicType to);
274 void increment();
275 void _goto(int from_bci, int to_bci);
276 void if_node(Value x, If::Condition cond, Value y, ValueStack* stack_before);
277 void if_zero(ValueType* type, If::Condition cond);
278 void if_null(ValueType* type, If::Condition cond);
279 void if_same(ValueType* type, If::Condition cond);
280 void jsr(int dest);
281 void ret(int local_index);
282 void table_switch();
283 void lookup_switch();
284 void method_return(Value x, bool ignore_return = false);
285 void call_register_finalizer();
286 void access_field(Bytecodes::Code code);
287 void invoke(Bytecodes::Code code);
288 void new_instance(int klass_index);
289 void new_type_array();
290 void new_object_array();
291 void check_cast(int klass_index);
292 void instance_of(int klass_index);
293 void monitorenter(Value x, int bci);
294 void monitorexit(Value x, int bci);
295 void new_multi_array(int dimensions);
296 void throw_op(int bci);
297
298 // inline types
299 void copy_inline_content(ciInlineKlass* vk, Value src, int src_off, Value dest, int dest_off, ValueStack* state_before, ciField* encloding_field = nullptr);
300
301 // stack/code manipulation helpers
302 Instruction* append_with_bci(Instruction* instr, int bci);
303 Instruction* append(Instruction* instr);
304 Instruction* append_split(StateSplit* instr);
305
306 // other helpers
307 BlockBegin* block_at(int bci) { return scope_data()->block_at(bci); }
308 XHandlers* handle_exception(Instruction* instruction);
309 void connect_to_end(BlockBegin* beg);
310 void null_check(Value value);
311 void eliminate_redundant_phis(BlockBegin* start);
312 BlockEnd* iterate_bytecodes_for_block(int bci);
313 void iterate_all_blocks(bool start_in_current_block_for_inlining = false);
314 Dependencies* dependency_recorder() const; // = compilation()->dependencies()
315 bool direct_compare(ciKlass* k);
316 Value make_constant(ciConstant value, ciField* field);
317
318 void kill_all();
319
320 // use of state copy routines (try to minimize unnecessary state
321 // object allocations):
322
323 // - if the instruction unconditionally needs a full copy of the
324 // state (for patching for example), then use copy_state_before*
325
326 // - if the instruction needs a full copy of the state only for
327 // handler generation (Instruction::needs_exception_state() returns
328 // false) then use copy_state_exhandling*
329
330 // - if the instruction needs either a full copy of the state for
331 // handler generation and a least a minimal copy of the state (as
332 // returned by Instruction::exception_state()) for debug info
333 // generation (that is when Instruction::needs_exception_state()
334 // returns true) then use copy_state_for_exception*
335
336 ValueStack* copy_state_before_with_bci(int bci);
337 ValueStack* copy_state_before();
338 ValueStack* copy_state_exhandling_with_bci(int bci);
339 ValueStack* copy_state_exhandling();
340 ValueStack* copy_state_for_exception_with_bci(int bci);
341 ValueStack* copy_state_for_exception();
342 ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : nullptr; }
343 ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
344
345 //
346 // Inlining support
347 //
348
349 // accessors
350 bool parsing_jsr() const { return scope_data()->parsing_jsr(); }
351 BlockBegin* continuation() const { return scope_data()->continuation(); }
352 BlockBegin* jsr_continuation() const { return scope_data()->jsr_continuation(); }
353 void set_continuation(BlockBegin* continuation) { scope_data()->set_continuation(continuation); }
354 void set_inline_cleanup_info(BlockBegin* block,
355 Instruction* return_prev,
356 ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
357 return_prev,
358 return_state); }
359 void set_inline_cleanup_info() {
360 set_inline_cleanup_info(_block, _last, _state);
361 }
362 BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); }
363 Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); }
364 ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); }
365 void restore_inline_cleanup_info() {
366 _block = inline_cleanup_block();
367 _last = inline_cleanup_return_prev();
368 _state = inline_cleanup_state();
369 }
370 void incr_num_returns() { scope_data()->incr_num_returns(); }
371 int num_returns() const { return scope_data()->num_returns(); }
372 intx max_inline_size() const { return scope_data()->max_inline_size(); }
373 int inline_level() const { return scope()->level(); }
374 int recursive_inline_level(ciMethod* callee) const;
375
376 // inlining of synchronized methods
377 void inline_sync_entry(Value lock, BlockBegin* sync_handler);
378 void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
379
380 void build_graph_for_intrinsic(ciMethod* callee, bool ignore_return);
381
382 // inliners
383 bool try_inline( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = nullptr);
384 bool try_inline_intrinsics(ciMethod* callee, bool ignore_return = false);
385 bool try_inline_full( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = nullptr);
386 bool try_inline_jsr(int jsr_dest_bci);
387
388 const char* check_can_parse(ciMethod* callee) const;
389 const char* should_not_inline(ciMethod* callee) const;
390
391 // JSR 292 support
392 bool try_method_handle_inline(ciMethod* callee, bool ignore_return);
393
394 // helpers
395 void inline_bailout(const char* msg);
396 BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
397 BlockBegin* setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* init_state);
398 void setup_osr_entry_block();
399 void clear_inline_bailout();
400 ValueStack* state_at_entry();
401 void push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start);
402 void push_scope(ciMethod* callee, BlockBegin* continuation);
403 void push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci);
404 void pop_scope();
405 void pop_scope_for_jsr();
406
407 void append_unsafe_get(ciMethod* callee, BasicType t, bool is_volatile);
408 void append_unsafe_put(ciMethod* callee, BasicType t, bool is_volatile);
409 void append_unsafe_CAS(ciMethod* callee);
410 void append_unsafe_get_and_set(ciMethod* callee, bool is_add);
411 void append_char_access(ciMethod* callee, bool is_store);
412 void append_alloc_array_copy(ciMethod* callee);
413
414 void print_inlining(ciMethod* callee, const char* msg, bool success = true);
415
416 void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
417 void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = nullptr, int bci = -1);
418 void profile_invocation(ciMethod* inlinee, ValueStack* state);
419
420 // Shortcuts to profiling control.
421 bool is_profiling() { return _compilation->is_profiling(); }
422 bool profile_branches() { return _compilation->profile_branches(); }
423 bool profile_calls() { return _compilation->profile_calls(); }
424 bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
425 bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
426 bool profile_parameters() { return _compilation->profile_parameters(); }
427 bool profile_arguments() { return _compilation->profile_arguments(); }
428 bool profile_return() { return _compilation->profile_return(); }
429 bool profile_array_accesses(){ return _compilation->profile_array_accesses();}
430
431 Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
432 Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
433 void check_args_for_profiling(Values* obj_args, int expected);
434
435 public:
436 NOT_PRODUCT(void print_stats();)
437
438 // initialization
439 static void initialize();
440
441 // public
442 static bool can_trap(ciMethod* method, Bytecodes::Code code) {
443 assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
444 if (_can_trap[code]) return true;
445 // special handling for finalizer registration
446 return code == Bytecodes::_return && method->intrinsic_id() == vmIntrinsics::_Object_init;
447 }
448
449 // creation
450 GraphBuilder(Compilation* compilation, IRScope* scope);
451 static void sort_top_into_worklist(BlockList* worklist, BlockBegin* top);
452
453 BlockBegin* start() const { return _start; }
454 };
455
456 #endif // SHARE_C1_C1_GRAPHBUILDER_HPP