1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_COMPILE_HPP
26 #define SHARE_OPTO_COMPILE_HPP
27
28 #include "asm/codeBuffer.hpp"
29 #include "ci/compilerInterface.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/cHeapStringHolder.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compiler_globals.hpp"
34 #include "compiler/compilerEvent.hpp"
35 #include "libadt/dict.hpp"
36 #include "libadt/vectset.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/methodData.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/phase.hpp"
41 #include "opto/phasetype.hpp"
42 #include "opto/printinlining.hpp"
43 #include "opto/regmask.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/timerTrace.hpp"
47 #include "runtime/vmThread.hpp"
48 #include "utilities/ticks.hpp"
49 #include "utilities/vmEnums.hpp"
50
51 class AbstractLockNode;
52 class AddPNode;
53 class Block;
54 class Bundle;
55 class CallGenerator;
56 class CallStaticJavaNode;
57 class CloneMap;
58 class CompilationFailureInfo;
59 class ConnectionGraph;
60 class IdealGraphPrinter;
61 class InlineTree;
62 class Matcher;
63 class MachConstantNode;
64 class MachConstantBaseNode;
65 class MachNode;
66 class MachOper;
67 class MachSafePointNode;
68 class Node;
69 class Node_Array;
70 class Node_List;
71 class Node_Notes;
72 class NodeHash;
73 class NodeCloneInfo;
74 class OpaqueTemplateAssertionPredicateNode;
75 class OptoReg;
76 class ParsePredicateNode;
77 class PhaseCFG;
78 class PhaseGVN;
79 class PhaseIterGVN;
80 class PhaseRegAlloc;
81 class PhaseCCP;
82 class PhaseOutput;
83 class ReachabilityFenceNode;
84 class RootNode;
85 class relocInfo;
86 class StartNode;
87 class SafePointNode;
88 class JVMState;
89 class Type;
90 class TypeInt;
91 class TypeInteger;
92 class TypeKlassPtr;
93 class TypePtr;
94 class TypeOopPtr;
95 class TypeFunc;
96 class TypeVect;
97 class Type_Array;
98 class Unique_Node_List;
99 class UnstableIfTrap;
100 class nmethod;
101 class Node_Stack;
102 struct Final_Reshape_Counts;
103 class VerifyMeetResult;
104
105 enum LoopOptsMode {
106 LoopOptsDefault,
107 LoopOptsNone,
108 LoopOptsMaxUnroll,
109 LoopOptsShenandoahExpand,
110 LoopOptsSkipSplitIf,
111 LoopOptsVerify,
112 PostLoopOptsExpandReachabilityFences
113 };
114
115 // The type of all node counts and indexes.
116 // It must hold at least 16 bits, but must also be fast to load and store.
117 // This type, if less than 32 bits, could limit the number of possible nodes.
118 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
119 typedef unsigned int node_idx_t;
120
121 class NodeCloneInfo {
122 private:
123 uint64_t _idx_clone_orig;
124 public:
125
126 void set_idx(node_idx_t idx) {
127 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx;
128 }
129 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
130
131 void set_gen(int generation) {
132 uint64_t g = (uint64_t)generation << 32;
133 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
134 }
135 int gen() const { return (int)(_idx_clone_orig >> 32); }
136
137 void set(uint64_t x) { _idx_clone_orig = x; }
138 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
139 uint64_t get() const { return _idx_clone_orig; }
140
141 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
142 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
143
144 void dump_on(outputStream* st) const;
145 };
146
147 class CloneMap {
148 friend class Compile;
149 private:
150 bool _debug;
151 Dict* _dict;
152 int _clone_idx; // current cloning iteration/generation in loop unroll
153 public:
154 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
155 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; }
156 Dict* dict() const { return _dict; }
157 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); }
158 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
159 void remove(node_idx_t key) { _dict->Delete(_2p(key)); }
160 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); }
161 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); }
162 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); }
163 int gen(const void* k) const { return gen(_2_node_idx_t(k)); }
164 int max_gen() const;
165 void clone(Node* old, Node* nnn, int gen);
166 void verify_insert_and_clone(Node* old, Node* nnn, int gen);
167 void dump(node_idx_t key, outputStream* st) const;
168
169 int clone_idx() const { return _clone_idx; }
170 void set_clone_idx(int x) { _clone_idx = x; }
171 bool is_debug() const { return _debug; }
172 void set_debug(bool debug) { _debug = debug; }
173
174 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
175 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
176 };
177
178 class Options {
179 friend class Compile;
180 private:
181 const bool _subsume_loads; // Load can be matched as part of a larger op.
182 const bool _do_escape_analysis; // Do escape analysis.
183 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
184 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges.
185 const bool _eliminate_boxing; // Do boxing elimination.
186 const bool _do_locks_coarsening; // Do locks coarsening
187 const bool _do_superword; // Do SuperWord
188 const bool _install_code; // Install the code that was compiled
189 public:
190 Options(bool subsume_loads,
191 bool do_escape_analysis,
192 bool do_iterative_escape_analysis,
193 bool do_reduce_allocation_merges,
194 bool eliminate_boxing,
195 bool do_locks_coarsening,
196 bool do_superword,
197 bool install_code) :
198 _subsume_loads(subsume_loads),
199 _do_escape_analysis(do_escape_analysis),
200 _do_iterative_escape_analysis(do_iterative_escape_analysis),
201 _do_reduce_allocation_merges(do_reduce_allocation_merges),
202 _eliminate_boxing(eliminate_boxing),
203 _do_locks_coarsening(do_locks_coarsening),
204 _do_superword(do_superword),
205 _install_code(install_code) {
206 }
207
208 static Options for_runtime_stub() {
209 return Options(
210 /* subsume_loads = */ true,
211 /* do_escape_analysis = */ false,
212 /* do_iterative_escape_analysis = */ false,
213 /* do_reduce_allocation_merges = */ false,
214 /* eliminate_boxing = */ false,
215 /* do_lock_coarsening = */ false,
216 /* do_superword = */ true,
217 /* install_code = */ true
218 );
219 }
220 };
221
222 //------------------------------Compile----------------------------------------
223 // This class defines a top-level Compiler invocation.
224
225 class Compile : public Phase {
226
227 public:
228 // Fixed alias indexes. (See also MergeMemNode.)
229 enum {
230 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
231 AliasIdxBot = 2, // pseudo-index, aliases to everything
232 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
233 };
234
235 // Variant of TraceTime(nullptr, &_t_accumulator, CITime);
236 // Integrated with logging. If logging is turned on, and CITimeVerbose is true,
237 // then brackets are put into the log, with time stamps and node counts.
238 // (The time collection itself is always conditionalized on CITime.)
239 class TracePhase : public TraceTime {
240 private:
241 Compile* const _compile;
242 CompileLog* _log;
243 const bool _dolog;
244 public:
245 TracePhase(PhaseTraceId phaseTraceId);
246 TracePhase(const char* name, PhaseTraceId phaseTraceId);
247 ~TracePhase();
248 const char* phase_name() const { return title(); }
249 };
250
251 // Information per category of alias (memory slice)
252 class AliasType {
253 private:
254 friend class Compile;
255
256 int _index; // unique index, used with MergeMemNode
257 const TypePtr* _adr_type; // normalized address type
258 ciField* _field; // relevant instance field, or null if none
259 const Type* _element; // relevant array element type, or null if none
260 bool _is_rewritable; // false if the memory is write-once only
261 int _general_index; // if this is type is an instance, the general
262 // type that this is an instance of
263
264 void Init(int i, const TypePtr* at);
265
266 public:
267 int index() const { return _index; }
268 const TypePtr* adr_type() const { return _adr_type; }
269 ciField* field() const { return _field; }
270 const Type* element() const { return _element; }
271 bool is_rewritable() const { return _is_rewritable; }
272 bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
273 int general_index() const { return (_general_index != 0) ? _general_index : _index; }
274
275 void set_rewritable(bool z) { _is_rewritable = z; }
276 void set_field(ciField* f) {
277 assert(!_field,"");
278 _field = f;
279 if (f->is_final() || f->is_stable()) {
280 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
281 _is_rewritable = false;
282 }
283 }
284 void set_element(const Type* e) {
285 assert(_element == nullptr, "");
286 _element = e;
287 }
288
289 BasicType basic_type() const;
290
291 void print_on(outputStream* st) PRODUCT_RETURN;
292 };
293
294 enum {
295 logAliasCacheSize = 6,
296 AliasCacheSize = (1<<logAliasCacheSize)
297 };
298 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
299 enum {
300 trapHistLength = MethodData::_trap_hist_limit
301 };
302
303 private:
304 // Fixed parameters to this compilation.
305 const int _compile_id;
306 const Options _options; // Compilation options
307 ciMethod* _method; // The method being compiled.
308 int _entry_bci; // entry bci for osr methods.
309 const TypeFunc* _tf; // My kind of signature
310 InlineTree* _ilt; // Ditto (temporary).
311 address _stub_function; // VM entry for stub being compiled, or null
312 const char* _stub_name; // Name of stub or adapter being compiled, or null
313 StubId _stub_id; // unique id for stub or NO_STUBID
314 address _stub_entry_point; // Compile code entry for generated stub, or null
315
316 // Control of this compilation.
317 int _max_inline_size; // Max inline size for this compilation
318 int _freq_inline_size; // Max hot method inline size for this compilation
319 int _fixed_slots; // count of frame slots not allocated by the register
320 // allocator i.e. locks, original deopt pc, etc.
321 uintx _max_node_limit; // Max unique node count during a single compilation.
322 uint _node_count_inlining_cutoff; // Number of nodes in the graph above which inlining is denied
323
324 bool _post_loop_opts_phase; // Loop opts are finished.
325 bool _merge_stores_phase; // Phase for merging stores, after post loop opts phase.
326 bool _allow_macro_nodes; // True if we allow creation of macro nodes.
327
328 /* If major progress is set:
329 * Marks that the loop tree information (get_ctrl, idom, get_loop, etc.) could be invalid, and we need to rebuild the loop tree.
330 * It also indicates that the graph was changed in a way that is promising to be able to apply more loop optimization.
331 * If major progress is not set:
332 * Loop tree information is valid.
333 * If major progress is not set at the end of a loop opts phase, then we can stop loop opts, because we do not expect any further progress if we did more loop opts phases.
334 *
335 * This is not 100% accurate, the semantics of major progress has become less clear over time, but this is the general idea.
336 */
337 bool _major_progress;
338 bool _inlining_progress; // progress doing incremental inlining?
339 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
340 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining
341 bool _has_loops; // True if the method _may_ have some loops
342 bool _has_split_ifs; // True if the method _may_ have some split-if
343 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
344 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
345 bool _has_boxed_value; // True if a boxed object is allocated
346 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
347 uint _max_vector_size; // Maximum size of generated vectors
348 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper
349 uint _trap_hist[trapHistLength]; // Cumulative traps
350 bool _trap_can_recompile; // Have we emitted a recompiling trap?
351 uint _decompile_count; // Cumulative decompilation counts.
352 bool _do_inlining; // True if we intend to do inlining
353 bool _do_scheduling; // True if we intend to do scheduling
354 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
355 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
356 bool _use_cmove; // True if CMove should be used without profitability analysis
357 bool _do_aliasing; // True if we intend to do aliasing
358 bool _print_assembly; // True if we should dump assembly code for this compilation
359 bool _print_inlining; // True if we should print inlining for this compilation
360 bool _print_intrinsics; // True if we should print intrinsics for this compilation
361 bool _print_phase_loop_opts; // True if we should print before and after loop opts phase
362 #ifndef PRODUCT
363 uint _phase_counter; // Counter for the number of already printed phases
364 uint _igv_idx; // Counter for IGV node identifiers
365 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
366 bool _trace_opto_output;
367 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
368 #endif
369 bool _has_irreducible_loop; // Found irreducible loops
370 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
371 bool _has_scoped_access; // For shared scope closure
372 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
373 int _loop_opts_cnt; // loop opts round
374 uint _stress_seed; // Seed for stress testing
375
376 // Compilation environment.
377 Arena _comp_arena; // Arena with lifetime equivalent to Compile
378 void* _barrier_set_state; // Potential GC barrier state for Compile
379 ciEnv* _env; // CI interface
380 DirectiveSet* _directive; // Compiler directive
381 CompileLog* _log; // from CompilerThread
382 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
383 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
384 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
385 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
386 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
387 // List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates which can be seen as list
388 // of Template Assertion Predicates themselves.
389 GrowableArray<OpaqueTemplateAssertionPredicateNode*> _template_assertion_predicate_opaques;
390 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
391 GrowableArray<ReachabilityFenceNode*> _reachability_fences; // List of reachability fences
392 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
393 GrowableArray<Node*> _for_merge_stores_igvn; // List of nodes for IGVN merge stores
394 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
395 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
396 ConnectionGraph* _congraph;
397 #ifndef PRODUCT
398 IdealGraphPrinter* _igv_printer;
399 static IdealGraphPrinter* _debug_file_printer;
400 static IdealGraphPrinter* _debug_network_printer;
401 #endif
402
403
404 // Node management
405 uint _unique; // Counter for unique Node indices
406 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
407 // So use this to keep count and make the call O(1).
408 VectorSet _dead_node_list; // Set of dead nodes
409 DEBUG_ONLY(Unique_Node_List* _modified_nodes;) // List of nodes which inputs were modified
410 DEBUG_ONLY(bool _phase_optimize_finished;) // Used for live node verification while creating new nodes
411
412 DEBUG_ONLY(bool _phase_verify_ideal_loop;) // Are we in PhaseIdealLoop verification?
413
414 // Arenas for new-space and old-space nodes.
415 // Swapped between using _node_arena.
416 // The lifetime of the old-space nodes is during xform.
417 Arena _node_arena_one;
418 Arena _node_arena_two;
419 Arena* _node_arena;
420 public:
421 Arena* swap_old_and_new() {
422 Arena* filled_arena_ptr = _node_arena;
423 Arena* old_arena_ptr = old_arena();
424 old_arena_ptr->destruct_contents();
425 _node_arena = old_arena_ptr;
426 return filled_arena_ptr;
427 }
428 private:
429 RootNode* _root; // Unique root of compilation, or null after bail-out.
430 Node* _top; // Unique top node. (Reset by various phases.)
431
432 Node* _immutable_memory; // Initial memory state
433
434 Node* _recent_alloc_obj;
435 Node* _recent_alloc_ctl;
436
437 // Constant table
438 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
439
440
441 // Blocked array of debugging and profiling information,
442 // tracked per node.
443 enum { _log2_node_notes_block_size = 8,
444 _node_notes_block_size = (1<<_log2_node_notes_block_size)
445 };
446 GrowableArray<Node_Notes*>* _node_note_array;
447 Node_Notes* _default_node_notes; // default notes for new nodes
448
449 // After parsing and every bulk phase we hang onto the Root instruction.
450 // The RootNode instruction is where the whole program begins. It produces
451 // the initial Control and BOTTOM for everybody else.
452
453 // Type management
454 Arena _Compile_types; // Arena for all types
455 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
456 Dict* _type_dict; // Intern table
457 CloneMap _clone_map; // used for recording history of cloned nodes
458 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
459 ciMethod* _last_tf_m; // Cache for
460 const TypeFunc* _last_tf; // TypeFunc::make
461 AliasType** _alias_types; // List of alias types seen so far.
462 int _num_alias_types; // Logical length of _alias_types
463 int _max_alias_types; // Physical length of _alias_types
464 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
465
466 // Parsing, optimization
467 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
468
469 // Shared worklist for all IGVN rounds. Nodes can be pushed to it at any time.
470 // If pushed outside IGVN, the Node is processed in the next IGVN round.
471 Unique_Node_List* _igvn_worklist;
472
473 // Shared type array for GVN, IGVN and CCP. It maps node idx -> Type*.
474 Type_Array* _types;
475
476 // Shared node hash table for GVN, IGVN and CCP.
477 NodeHash* _node_hash;
478
479 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
480 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
481 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
482
483 GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
484
485 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
486 bool _has_mh_late_inlines; // Can there still be a method handle late inlining pending?
487 // false: there can't be one
488 // true: we've enqueued one at some point so there may still be one
489
490 // "MemLimit" directive was specified and the memory limit was hit during compilation
491 bool _oom;
492
493 // Only keep nodes in the expensive node list that need to be optimized
494 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
495 // Use for sorting expensive nodes to bring similar nodes together
496 static int cmp_expensive_nodes(Node** n1, Node** n2);
497 // Expensive nodes list already sorted?
498 bool expensive_nodes_sorted() const;
499 // Remove the speculative part of types and clean up the graph
500 void remove_speculative_types(PhaseIterGVN &igvn);
501
502 void* _replay_inline_data; // Pointer to data loaded from file
503
504 void log_late_inline_failure(CallGenerator* cg, const char* msg);
505 DEBUG_ONLY(bool _exception_backedge;)
506
507 void record_method_not_compilable_oom();
508
509 InlinePrinter _inline_printer;
510
511 public:
512 void* barrier_set_state() const { return _barrier_set_state; }
513
514 InlinePrinter* inline_printer() { return &_inline_printer; }
515
516 #ifndef PRODUCT
517 IdealGraphPrinter* igv_printer() { return _igv_printer; }
518 void reset_igv_phase_iter(CompilerPhaseType cpt) { _igv_phase_iter[cpt] = 0; }
519 #endif
520
521 void log_late_inline(CallGenerator* cg);
522 void log_inline_id(CallGenerator* cg);
523 void log_inline_failure(const char* msg);
524
525 void* replay_inline_data() const { return _replay_inline_data; }
526
527 // Dump inlining replay data to the stream.
528 void dump_inline_data(outputStream* out);
529 void dump_inline_data_reduced(outputStream* out);
530
531 private:
532 // Matching, CFG layout, allocation, code generation
533 PhaseCFG* _cfg; // Results of CFG finding
534 int _java_calls; // Number of java calls in the method
535 int _inner_loops; // Number of inner loops in the method
536 Matcher* _matcher; // Engine to map ideal to machine instructions
537 PhaseRegAlloc* _regalloc; // Results of register allocation.
538 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
539 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
540 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
541 int _interpreter_frame_size;
542
543 // Holds dynamically allocated extensions of short-lived register masks. Such
544 // extensions are potentially quite large and need tight resource marks which
545 // may conflict with other allocations in the default resource area.
546 // Therefore, we use a dedicated resource area for register masks.
547 ResourceArea _regmask_arena;
548
549 PhaseOutput* _output;
550
551 public:
552 // Accessors
553
554 // The Compile instance currently active in this (compiler) thread.
555 static Compile* current() {
556 return (Compile*) ciEnv::current()->compiler_data();
557 }
558
559 int interpreter_frame_size() const { return _interpreter_frame_size; }
560
561 PhaseOutput* output() const { return _output; }
562 void set_output(PhaseOutput* o) { _output = o; }
563
564 // ID for this compilation. Useful for setting breakpoints in the debugger.
565 int compile_id() const { return _compile_id; }
566 DirectiveSet* directive() const { return _directive; }
567
568 // Does this compilation allow instructions to subsume loads? User
569 // instructions that subsume a load may result in an unschedulable
570 // instruction sequence.
571 bool subsume_loads() const { return _options._subsume_loads; }
572 /** Do escape analysis. */
573 bool do_escape_analysis() const { return _options._do_escape_analysis; }
574 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
575 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; }
576 /** Do boxing elimination. */
577 bool eliminate_boxing() const { return _options._eliminate_boxing; }
578 /** Do aggressive boxing elimination. */
579 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
580 bool should_install_code() const { return _options._install_code; }
581 /** Do locks coarsening. */
582 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
583 bool do_superword() const { return _options._do_superword; }
584
585 // Other fixed compilation parameters.
586 ciMethod* method() const { return _method; }
587 int entry_bci() const { return _entry_bci; }
588 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
589 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
590 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
591 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
592 InlineTree* ilt() const { return _ilt; }
593 address stub_function() const { return _stub_function; }
594 const char* stub_name() const { return _stub_name; }
595 StubId stub_id() const { return _stub_id; }
596 address stub_entry_point() const { return _stub_entry_point; }
597 void set_stub_entry_point(address z) { _stub_entry_point = z; }
598
599 // Control of this compilation.
600 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
601 void set_fixed_slots(int n) { _fixed_slots = n; }
602 void set_inlining_progress(bool z) { _inlining_progress = z; }
603 int inlining_progress() const { return _inlining_progress; }
604 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
605 int inlining_incrementally() const { return _inlining_incrementally; }
606 void set_do_cleanup(bool z) { _do_cleanup = z; }
607 int do_cleanup() const { return _do_cleanup; }
608 bool major_progress() const { return _major_progress; }
609 void set_major_progress() { _major_progress = true; }
610 void restore_major_progress(bool progress) { _major_progress = _major_progress || progress; }
611 void clear_major_progress() { _major_progress = false; }
612 int max_inline_size() const { return _max_inline_size; }
613 void set_freq_inline_size(int n) { _freq_inline_size = n; }
614 int freq_inline_size() const { return _freq_inline_size; }
615 void set_max_inline_size(int n) { _max_inline_size = n; }
616 bool has_loops() const { return _has_loops; }
617 void set_has_loops(bool z) { _has_loops = z; }
618 bool has_split_ifs() const { return _has_split_ifs; }
619 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
620 bool has_unsafe_access() const { return _has_unsafe_access; }
621 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
622 bool has_stringbuilder() const { return _has_stringbuilder; }
623 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
624 bool has_boxed_value() const { return _has_boxed_value; }
625 void set_has_boxed_value(bool z) { _has_boxed_value = z; }
626 bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
627 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
628 uint max_vector_size() const { return _max_vector_size; }
629 void set_max_vector_size(uint s) { _max_vector_size = s; }
630 bool clear_upper_avx() const { return _clear_upper_avx; }
631 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; }
632 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
633 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
634 bool trap_can_recompile() const { return _trap_can_recompile; }
635 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
636 uint decompile_count() const { return _decompile_count; }
637 void set_decompile_count(uint c) { _decompile_count = c; }
638 bool allow_range_check_smearing() const;
639 bool do_inlining() const { return _do_inlining; }
640 void set_do_inlining(bool z) { _do_inlining = z; }
641 bool do_scheduling() const { return _do_scheduling; }
642 void set_do_scheduling(bool z) { _do_scheduling = z; }
643 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
644 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
645 bool do_vector_loop() const { return _do_vector_loop; }
646 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
647 bool use_cmove() const { return _use_cmove; }
648 void set_use_cmove(bool z) { _use_cmove = z; }
649 bool do_aliasing() const { return _do_aliasing; }
650 bool print_assembly() const { return _print_assembly; }
651 void set_print_assembly(bool z) { _print_assembly = z; }
652 bool print_inlining() const { return _print_inlining; }
653 void set_print_inlining(bool z) { _print_inlining = z; }
654 bool print_intrinsics() const { return _print_intrinsics; }
655 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
656 uint max_node_limit() const { return (uint)_max_node_limit; }
657 void set_max_node_limit(uint n) { _max_node_limit = n; }
658 uint node_count_inlining_cutoff() const { return _node_count_inlining_cutoff; }
659 void set_node_count_inlining_cutoff(uint n) { _node_count_inlining_cutoff = n; }
660 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
661 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
662 bool has_monitors() const { return _has_monitors; }
663 void set_has_monitors(bool v) { _has_monitors = v; }
664 bool has_scoped_access() const { return _has_scoped_access; }
665 void set_has_scoped_access(bool v) { _has_scoped_access = v; }
666
667 // check the CompilerOracle for special behaviours for this compile
668 bool method_has_option(CompileCommandEnum option) const {
669 return method() != nullptr && method()->has_option(option);
670 }
671
672 #ifndef PRODUCT
673 uint next_igv_idx() { return _igv_idx++; }
674 bool trace_opto_output() const { return _trace_opto_output; }
675 void print_phase(const char* phase_name);
676 void print_ideal_ir(const char* compile_phase_name) const;
677 bool should_print_ideal() const { return _directive->PrintIdealOption; }
678 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
679 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
680 int _in_dump_cnt; // Required for dumping ir nodes.
681 #endif
682 bool has_irreducible_loop() const { return _has_irreducible_loop; }
683 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
684
685 Ticks _latest_stage_start_counter;
686
687 void begin_method();
688 void end_method();
689
690 void print_method(CompilerPhaseType compile_phase, int level, Node* n = nullptr);
691
692 #ifndef PRODUCT
693 bool should_print_igv(int level);
694 bool should_print_phase(int level) const;
695 bool should_print_ideal_phase(CompilerPhaseType cpt) const;
696 void init_igv();
697 void dump_igv(const char* graph_name, int level = 3) {
698 if (should_print_igv(level)) {
699 _igv_printer->print_graph(graph_name, nullptr);
700 }
701 }
702
703 void igv_print_method_to_file(const char* phase_name = nullptr, bool append = false, const frame* fr = nullptr);
704 void igv_print_method_to_network(const char* phase_name = nullptr, const frame* fr = nullptr);
705 void igv_print_graph_to_network(const char* name, GrowableArray<const Node*>& visible_nodes, const frame* fr);
706 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
707 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
708 #endif
709
710 const GrowableArray<ParsePredicateNode*>& parse_predicates() const {
711 return _parse_predicates;
712 }
713
714 const GrowableArray<OpaqueTemplateAssertionPredicateNode*>& template_assertion_predicate_opaques() const {
715 return _template_assertion_predicate_opaques;
716 }
717
718 int macro_count() const { return _macro_nodes.length(); }
719 int parse_predicate_count() const { return _parse_predicates.length(); }
720 int template_assertion_predicate_count() const { return _template_assertion_predicate_opaques.length(); }
721 int expensive_count() const { return _expensive_nodes.length(); }
722 int coarsened_count() const { return _coarsened_locks.length(); }
723 Node* macro_node(int idx) const { return _macro_nodes.at(idx); }
724
725 Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); }
726
727 ReachabilityFenceNode* reachability_fence(int idx) const { return _reachability_fences.at(idx); }
728 int reachability_fences_count() const { return _reachability_fences.length(); }
729
730 ConnectionGraph* congraph() { return _congraph;}
731 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
732 void add_macro_node(Node * n) {
733 //assert(n->is_macro(), "must be a macro node");
734 assert(!_macro_nodes.contains(n), "duplicate entry in expand list");
735 _macro_nodes.append(n);
736 }
737 void remove_macro_node(Node* n) {
738 // this function may be called twice for a node so we can only remove it
739 // if it's still existing.
740 _macro_nodes.remove_if_existing(n);
741 // Remove from coarsened locks list if present
742 if (coarsened_count() > 0) {
743 remove_coarsened_lock(n);
744 }
745 }
746 void add_expensive_node(Node* n);
747 void remove_expensive_node(Node* n) {
748 _expensive_nodes.remove_if_existing(n);
749 }
750
751 void add_reachability_fence(ReachabilityFenceNode* rf) {
752 _reachability_fences.append(rf);
753 }
754
755 void remove_reachability_fence(ReachabilityFenceNode* n) {
756 _reachability_fences.remove_if_existing(n);
757 }
758
759 void add_parse_predicate(ParsePredicateNode* n) {
760 assert(!_parse_predicates.contains(n), "duplicate entry in Parse Predicate list");
761 _parse_predicates.append(n);
762 }
763
764 void remove_parse_predicate(ParsePredicateNode* n) {
765 if (parse_predicate_count() > 0) {
766 _parse_predicates.remove_if_existing(n);
767 }
768 }
769
770 void add_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) {
771 assert(!_template_assertion_predicate_opaques.contains(n),
772 "Duplicate entry in Template Assertion Predicate OpaqueTemplateAssertionPredicate list");
773 _template_assertion_predicate_opaques.append(n);
774 }
775
776 void remove_template_assertion_predicate_opaque(OpaqueTemplateAssertionPredicateNode* n) {
777 if (template_assertion_predicate_count() > 0) {
778 _template_assertion_predicate_opaques.remove_if_existing(n);
779 }
780 }
781 void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks);
782 void remove_coarsened_lock(Node* n);
783 bool coarsened_locks_consistent();
784 void mark_unbalanced_boxes() const;
785
786 bool post_loop_opts_phase() { return _post_loop_opts_phase; }
787 void set_post_loop_opts_phase() { _post_loop_opts_phase = true; }
788 void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; }
789
790 #ifdef ASSERT
791 bool phase_verify_ideal_loop() const { return _phase_verify_ideal_loop; }
792 void set_phase_verify_ideal_loop() { _phase_verify_ideal_loop = true; }
793 void reset_phase_verify_ideal_loop() { _phase_verify_ideal_loop = false; }
794 #endif
795
796 bool allow_macro_nodes() { return _allow_macro_nodes; }
797 void reset_allow_macro_nodes() { _allow_macro_nodes = false; }
798
799 void record_for_post_loop_opts_igvn(Node* n);
800 void remove_from_post_loop_opts_igvn(Node* n);
801 void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
802
803 void record_unstable_if_trap(UnstableIfTrap* trap);
804 bool remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield);
805 void remove_useless_unstable_if_traps(Unique_Node_List &useful);
806 void process_for_unstable_if_traps(PhaseIterGVN& igvn);
807
808 bool merge_stores_phase() { return _merge_stores_phase; }
809 void set_merge_stores_phase() { _merge_stores_phase = true; }
810 void record_for_merge_stores_igvn(Node* n);
811 void remove_from_merge_stores_igvn(Node* n);
812 void process_for_merge_stores_igvn(PhaseIterGVN& igvn);
813
814 void shuffle_late_inlines();
815 void shuffle_macro_nodes();
816 void sort_macro_nodes();
817
818 void mark_parse_predicate_nodes_useless(PhaseIterGVN& igvn);
819
820 // Are there candidate expensive nodes for optimization?
821 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
822 // Check whether n1 and n2 are similar
823 static int cmp_expensive_nodes(Node* n1, Node* n2);
824 // Sort expensive nodes to locate similar expensive nodes
825 void sort_expensive_nodes();
826
827 // Compilation environment.
828 Arena* comp_arena() { return &_comp_arena; }
829 ciEnv* env() const { return _env; }
830 CompileLog* log() const { return _log; }
831
832 bool failing_internal() const {
833 return _env->failing() ||
834 _failure_reason.get() != nullptr;
835 }
836
837 const char* failure_reason() const {
838 return _env->failing() ? _env->failure_reason()
839 : _failure_reason.get();
840 }
841
842 const CompilationFailureInfo* first_failure_details() const { return _first_failure_details; }
843
844 bool failing() {
845 if (failing_internal()) {
846 return true;
847 }
848 #ifdef ASSERT
849 // Disable stress code for PhaseIdealLoop verification (would have cascading effects).
850 if (phase_verify_ideal_loop()) {
851 return false;
852 }
853 if (StressBailout) {
854 return fail_randomly();
855 }
856 #endif
857 return false;
858 }
859
860 #ifdef ASSERT
861 bool fail_randomly();
862 bool failure_is_artificial();
863 #endif
864
865 bool failure_reason_is(const char* r) const {
866 return (r == _failure_reason.get()) ||
867 (r != nullptr &&
868 _failure_reason.get() != nullptr &&
869 strcmp(r, _failure_reason.get()) == 0);
870 }
871
872 void record_failure(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false));
873 void record_method_not_compilable(const char* reason DEBUG_ONLY(COMMA bool allow_multiple_failures = false)) {
874 env()->record_method_not_compilable(reason);
875 // Record failure reason.
876 record_failure(reason DEBUG_ONLY(COMMA allow_multiple_failures));
877 }
878 bool check_node_count(uint margin, const char* reason) {
879 if (oom()) {
880 record_method_not_compilable_oom();
881 return true;
882 }
883 if (live_nodes() + margin > max_node_limit()) {
884 record_method_not_compilable(reason);
885 return true;
886 } else {
887 return false;
888 }
889 }
890 bool oom() const { return _oom; }
891 void set_oom() { _oom = true; }
892
893 // Node management
894 uint unique() const { return _unique; }
895 uint next_unique() { return _unique++; }
896 void set_unique(uint i) { _unique = i; }
897 Arena* node_arena() { return _node_arena; }
898 Arena* old_arena() { return (&_node_arena_one == _node_arena) ? &_node_arena_two : &_node_arena_one; }
899 RootNode* root() const { return _root; }
900 void set_root(RootNode* r) { _root = r; }
901 StartNode* start() const; // (Derived from root.)
902 void verify_start(StartNode* s) const NOT_DEBUG_RETURN;
903 Node* immutable_memory();
904
905 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
906 Node* recent_alloc_obj() const { return _recent_alloc_obj; }
907 void set_recent_alloc(Node* ctl, Node* obj) {
908 _recent_alloc_ctl = ctl;
909 _recent_alloc_obj = obj;
910 }
911 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
912 _dead_node_count++;
913 }
914 void reset_dead_node_list() { _dead_node_list.reset();
915 _dead_node_count = 0;
916 }
917 uint live_nodes() const {
918 int val = _unique - _dead_node_count;
919 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count);
920 return (uint) val;
921 }
922 #ifdef ASSERT
923 void set_phase_optimize_finished() { _phase_optimize_finished = true; }
924 bool phase_optimize_finished() const { return _phase_optimize_finished; }
925 uint count_live_nodes_by_graph_walk();
926 void print_missing_nodes();
927 #endif
928
929 // Record modified nodes to check that they are put on IGVN worklist
930 void record_modified_node(Node* n) NOT_DEBUG_RETURN;
931 void remove_modified_node(Node* n) NOT_DEBUG_RETURN;
932 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } )
933
934 MachConstantBaseNode* mach_constant_base_node();
935 bool has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; }
936 // Generated by adlc, true if CallNode requires MachConstantBase.
937 bool needs_deep_clone_jvms();
938
939 // Handy undefined Node
940 Node* top() const { return _top; }
941
942 // these are used by guys who need to know about creation and transformation of top:
943 Node* cached_top_node() { return _top; }
944 void set_cached_top_node(Node* tn);
945
946 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
947 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
948 Node_Notes* default_node_notes() const { return _default_node_notes; }
949 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
950
951 Node_Notes* node_notes_at(int idx);
952
953 inline bool set_node_notes_at(int idx, Node_Notes* value);
954 // Copy notes from source to dest, if they exist.
955 // Overwrite dest only if source provides something.
956 // Return true if information was moved.
957 bool copy_node_notes_to(Node* dest, Node* source);
958
959 // Workhorse function to sort out the blocked Node_Notes array:
960 Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
961 int idx, bool can_grow = false);
962
963 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
964
965 // Type management
966 Arena* type_arena() { return _type_arena; }
967 Dict* type_dict() { return _type_dict; }
968 size_t type_last_size() { return _type_last_size; }
969 int num_alias_types() { return _num_alias_types; }
970
971 void init_type_arena() { _type_arena = &_Compile_types; }
972 void set_type_arena(Arena* a) { _type_arena = a; }
973 void set_type_dict(Dict* d) { _type_dict = d; }
974 void set_type_last_size(size_t sz) { _type_last_size = sz; }
975
976 const TypeFunc* last_tf(ciMethod* m) {
977 return (m == _last_tf_m) ? _last_tf : nullptr;
978 }
979 void set_last_tf(ciMethod* m, const TypeFunc* tf) {
980 assert(m != nullptr || tf == nullptr, "");
981 _last_tf_m = m;
982 _last_tf = tf;
983 }
984
985 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
986 AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr) { return find_alias_type(adr_type, false, field); }
987 bool have_alias_type(const TypePtr* adr_type);
988 AliasType* alias_type(ciField* field);
989
990 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
991 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
992 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
993
994 // Building nodes
995 void rethrow_exceptions(JVMState* jvms);
996 void return_values(JVMState* jvms);
997 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
998
999 // Decide how to build a call.
1000 // The profile factor is a discount to apply to this site's interp. profile.
1001 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
1002 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr,
1003 bool allow_intrinsics = true);
1004 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
1005 return C->directive()->should_delay_inline(call_method) ||
1006 should_delay_string_inlining(call_method, jvms) ||
1007 should_delay_boxing_inlining(call_method, jvms) ||
1008 should_delay_vector_inlining(call_method, jvms);
1009 }
1010 bool should_delay_after_inlining_cutoff(ciMethod* callee, ciMethod* caller);
1011 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
1012 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
1013 bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
1014 bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
1015
1016 // Helper functions to identify inlining potential at call-site
1017 ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
1018 ciKlass* holder, ciMethod* callee,
1019 const TypeOopPtr* receiver_type, bool is_virtual,
1020 bool &call_does_dispatch, int &vtable_index,
1021 bool check_access = true);
1022 ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
1023 ciMethod* callee, const TypeOopPtr* receiver_type,
1024 bool check_access = true);
1025
1026 // Report if there were too many traps at a current method and bci.
1027 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
1028 // If there is no MDO at all, report no trap unless told to assume it.
1029 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1030 // This version, unspecific to a particular bci, asks if
1031 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
1032 bool too_many_traps(Deoptimization::DeoptReason reason,
1033 // Privately used parameter for logging:
1034 ciMethodData* logmd = nullptr);
1035 // Report if there were too many recompiles at a method and bci.
1036 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1037 // Report if there were too many traps or recompiles at a method and bci.
1038 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
1039 return too_many_traps(method, bci, reason) ||
1040 too_many_recompiles(method, bci, reason);
1041 }
1042 // Return a bitset with the reasons where deoptimization is allowed,
1043 // i.e., where there were not too many uncommon traps.
1044 int _allowed_reasons;
1045 int allowed_deopt_reasons() { return _allowed_reasons; }
1046 void set_allowed_deopt_reasons();
1047
1048 // Parsing, optimization
1049 PhaseGVN* initial_gvn() { return _initial_gvn; }
1050 Unique_Node_List* igvn_worklist() {
1051 assert(_igvn_worklist != nullptr, "must be created in Compile::Compile");
1052 return _igvn_worklist;
1053 }
1054 Type_Array* types() {
1055 assert(_types != nullptr, "must be created in Compile::Compile");
1056 return _types;
1057 }
1058 NodeHash* node_hash() {
1059 assert(_node_hash != nullptr, "must be created in Compile::Compile");
1060 return _node_hash;
1061 }
1062 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp.
1063 inline void remove_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp.
1064 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
1065
1066 // Replace n by nn using initial_gvn, calling hash_delete and
1067 // record_for_igvn as needed.
1068 void gvn_replace_by(Node* n, Node* nn);
1069
1070
1071 void identify_useful_nodes(Unique_Node_List &useful);
1072 void update_dead_node_list(Unique_Node_List &useful);
1073 void disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints = nullptr);
1074
1075 void remove_useless_node(Node* dead);
1076
1077 // Record this CallGenerator for inlining at the end of parsing.
1078 void add_late_inline(CallGenerator* cg) {
1079 _late_inlines.insert_before(_late_inlines_pos, cg);
1080 if (StressIncrementalInlining) {
1081 assert(_late_inlines_pos < _late_inlines.length(), "unthinkable!");
1082 if (_late_inlines.length() - _late_inlines_pos >= 2) {
1083 int j = (C->random() % (_late_inlines.length() - _late_inlines_pos)) + _late_inlines_pos;
1084 swap(_late_inlines.at(_late_inlines_pos), _late_inlines.at(j));
1085 }
1086 }
1087 _late_inlines_pos++;
1088 }
1089
1090 void prepend_late_inline(CallGenerator* cg) {
1091 _late_inlines.insert_before(0, cg);
1092 }
1093
1094 void add_string_late_inline(CallGenerator* cg) {
1095 _string_late_inlines.push(cg);
1096 }
1097
1098 void add_boxing_late_inline(CallGenerator* cg) {
1099 _boxing_late_inlines.push(cg);
1100 }
1101
1102 void add_vector_reboxing_late_inline(CallGenerator* cg) {
1103 _vector_reboxing_late_inlines.push(cg);
1104 }
1105
1106 template<typename N, ENABLE_IF(std::is_base_of<Node, N>::value)>
1107 void remove_useless_nodes(GrowableArray<N*>& node_list, Unique_Node_List& useful);
1108
1109 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
1110 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
1111
1112 void remove_useless_coarsened_locks(Unique_Node_List& useful);
1113
1114 void dump_print_inlining();
1115
1116 bool over_inlining_cutoff() const {
1117 if (!inlining_incrementally()) {
1118 return unique() > (uint)NodeCountInliningCutoff;
1119 } else {
1120 // Give some room for incremental inlining algorithm to "breathe"
1121 // and avoid thrashing when live node count is close to the limit.
1122 // Keep in mind that live_nodes() isn't accurate during inlining until
1123 // dead node elimination step happens (see Compile::inline_incrementally).
1124 return live_nodes() > node_count_inlining_cutoff() * 11 / 10;
1125 }
1126 }
1127
1128 void mark_has_mh_late_inlines() { _has_mh_late_inlines = true; }
1129 bool has_mh_late_inlines() const { return _has_mh_late_inlines; }
1130
1131 bool inline_incrementally_one();
1132 void inline_incrementally_cleanup(PhaseIterGVN& igvn);
1133 void inline_incrementally(PhaseIterGVN& igvn);
1134 bool should_stress_inlining() { return StressIncrementalInlining && (random() % 2) == 0; }
1135 bool should_delay_inlining() { return AlwaysIncrementalInline || should_stress_inlining(); }
1136 void inline_string_calls(bool parse_time);
1137 void inline_boxing_calls(PhaseIterGVN& igvn);
1138 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
1139 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
1140
1141 void inline_vector_reboxing_calls();
1142 bool has_vbox_nodes();
1143
1144 void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
1145
1146 // Matching, CFG layout, allocation, code generation
1147 PhaseCFG* cfg() { return _cfg; }
1148 bool has_java_calls() const { return _java_calls > 0; }
1149 int java_calls() const { return _java_calls; }
1150 int inner_loops() const { return _inner_loops; }
1151 Matcher* matcher() { return _matcher; }
1152 PhaseRegAlloc* regalloc() { return _regalloc; }
1153 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
1154 ResourceArea* regmask_arena() { return &_regmask_arena; }
1155 Arena* indexSet_arena() { return _indexSet_arena; }
1156 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
1157 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
1158
1159 void update_interpreter_frame_size(int size) {
1160 if (_interpreter_frame_size < size) {
1161 _interpreter_frame_size = size;
1162 }
1163 }
1164
1165 void set_matcher(Matcher* m) { _matcher = m; }
1166 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
1167 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
1168 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
1169
1170 void set_java_calls(int z) { _java_calls = z; }
1171 void set_inner_loops(int z) { _inner_loops = z; }
1172
1173 Dependencies* dependencies() { return env()->dependencies(); }
1174
1175 // Major entry point. Given a Scope, compile the associated method.
1176 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
1177 // replacement, entry_bci indicates the bytecode for which to compile a
1178 // continuation.
1179 Compile(ciEnv* ci_env, ciMethod* target,
1180 int entry_bci, Options options, DirectiveSet* directive);
1181
1182 // Second major entry point. From the TypeFunc signature, generate code
1183 // to pass arguments from the Java calling convention to the C calling
1184 // convention.
1185 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1186 address stub_function, const char *stub_name,
1187 StubId stub_id, int is_fancy_jump, bool pass_tls,
1188 bool return_pc, DirectiveSet* directive);
1189
1190 ~Compile();
1191
1192 // Are we compiling a method?
1193 bool has_method() { return method() != nullptr; }
1194
1195 // Maybe print some information about this compile.
1196 void print_compile_messages();
1197
1198 // Final graph reshaping, a post-pass after the regular optimizer is done.
1199 bool final_graph_reshaping();
1200
1201 // returns true if adr is completely contained in the given alias category
1202 bool must_alias(const TypePtr* adr, int alias_idx);
1203
1204 // returns true if adr overlaps with the given alias category
1205 bool can_alias(const TypePtr* adr, int alias_idx);
1206
1207 // Stack slots that may be unused by the calling convention but must
1208 // otherwise be preserved. On Intel this includes the return address.
1209 // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1210 uint in_preserve_stack_slots() {
1211 return SharedRuntime::in_preserve_stack_slots();
1212 }
1213
1214 // "Top of Stack" slots that may be unused by the calling convention but must
1215 // otherwise be preserved.
1216 // On Intel these are not necessary and the value can be zero.
1217 static uint out_preserve_stack_slots() {
1218 return SharedRuntime::out_preserve_stack_slots();
1219 }
1220
1221 // Number of outgoing stack slots killed above the out_preserve_stack_slots
1222 // for calls to C. Supports the var-args backing area for register parms.
1223 uint varargs_C_out_slots_killed() const;
1224
1225 // Number of Stack Slots consumed by a synchronization entry
1226 int sync_stack_slots() const;
1227
1228 // Compute the name of old_SP. See <arch>.ad for frame layout.
1229 OptoReg::Name compute_old_SP();
1230
1231 private:
1232 // Phase control:
1233 void Init(bool aliasing); // Prepare for a single compilation
1234 void Optimize(); // Given a graph, optimize it
1235 void Code_Gen(); // Generate code from a graph
1236
1237 // Management of the AliasType table.
1238 void grow_alias_types();
1239 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1240 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1241 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1242
1243 void verify_top(Node*) const PRODUCT_RETURN;
1244
1245 // Intrinsic setup.
1246 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
1247 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper
1248 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
1249 void register_intrinsic(CallGenerator* cg); // update fn
1250
1251 #ifndef PRODUCT
1252 static juint _intrinsic_hist_count[];
1253 static jubyte _intrinsic_hist_flags[];
1254 #endif
1255 // Function calls made by the public function final_graph_reshaping.
1256 // No need to be made public as they are not called elsewhere.
1257 void final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1258 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes);
1259 void final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1260 void handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned);
1261
1262 // Logic cone optimization.
1263 void optimize_logic_cones(PhaseIterGVN &igvn);
1264 void collect_logic_cone_roots(Unique_Node_List& list);
1265 void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited);
1266 bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs);
1267 uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs);
1268 uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3);
1269 Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs);
1270 void check_no_dead_use() const NOT_DEBUG_RETURN;
1271
1272 public:
1273
1274 // Note: Histogram array size is about 1 Kb.
1275 enum { // flag bits:
1276 _intrinsic_worked = 1, // succeeded at least once
1277 _intrinsic_failed = 2, // tried it but it failed
1278 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1279 _intrinsic_virtual = 8, // was seen in the virtual form (rare)
1280 _intrinsic_both = 16 // was seen in the non-virtual form (usual)
1281 };
1282 // Update histogram. Return boolean if this is a first-time occurrence.
1283 static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1284 bool is_virtual, int flags) PRODUCT_RETURN0;
1285 static void print_intrinsic_statistics() PRODUCT_RETURN;
1286
1287 // Graph verification code
1288 // Walk the node list, verifying that there is a one-to-one correspondence
1289 // between Use-Def edges and Def-Use edges. The option no_dead_code enables
1290 // stronger checks that the graph is strongly connected from starting points
1291 // in both directions.
1292 // root_and_safepoints is used to give the starting points for the traversal.
1293 // If not supplied, only root is used. When this check is called after CCP,
1294 // we need to start traversal from Root and safepoints, just like CCP does its
1295 // own traversal (see PhaseCCP::transform for reasons).
1296 //
1297 // To call this function, there are 2 ways to go:
1298 // - give root_and_safepoints to start traversal everywhere needed (like after CCP)
1299 // - if the whole graph is assumed to be reachable from Root's input,
1300 // root_and_safepoints is not needed (like in PhaseRemoveUseless).
1301 //
1302 // Failure to specify root_and_safepoints in case the graph is not fully
1303 // reachable from Root's input make this check unsound (can miss inconsistencies)
1304 // and even incomplete (can make up non-existing problems) if no_dead_code is
1305 // true.
1306 void verify_graph_edges(bool no_dead_code = false, const Unique_Node_List* root_and_safepoints = nullptr) const PRODUCT_RETURN;
1307
1308 // Verify bi-directional correspondence of edges
1309 void verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints = nullptr) const;
1310
1311 // End-of-run dumps.
1312 static void print_statistics() PRODUCT_RETURN;
1313
1314 // Verify ADLC assumptions during startup
1315 static void adlc_verification() PRODUCT_RETURN;
1316
1317 // Definitions of pd methods
1318 static void pd_compiler2_init();
1319
1320 // Materialize reachability fences from reachability edges on safepoints.
1321 void expand_reachability_edges(Unique_Node_List& safepoints);
1322
1323 // Static parse-time type checking logic for gen_subtype_check:
1324 enum SubTypeCheckResult { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1325 SubTypeCheckResult static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip = StressReflectiveCode);
1326
1327 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1328 // Optional control dependency (for example, on range check)
1329 Node* ctrl = nullptr);
1330
1331 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1332 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
1333
1334 // Auxiliary methods for randomized fuzzing/stressing
1335 int random();
1336 bool randomized_select(int count);
1337
1338 // seed random number generation and log the seed for repeatability.
1339 void initialize_stress_seed(const DirectiveSet* directive);
1340
1341 // supporting clone_map
1342 CloneMap& clone_map();
1343 void set_clone_map(Dict* d);
1344
1345 bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method);
1346 bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method);
1347 bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1348
1349 #ifdef ASSERT
1350 VerifyMeetResult* _type_verify;
1351 void set_exception_backedge() { _exception_backedge = true; }
1352 bool has_exception_backedge() const { return _exception_backedge; }
1353 #endif
1354
1355 static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry,
1356 BasicType out_bt, BasicType in_bt);
1357
1358 static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res);
1359
1360 #ifndef PRODUCT
1361 private:
1362 // getting rid of the template makes things easier
1363 Node* make_debug_print_call(const char* str, address call_addr, PhaseGVN* gvn,
1364 Node* parm0 = nullptr, Node* parm1 = nullptr,
1365 Node* parm2 = nullptr, Node* parm3 = nullptr,
1366 Node* parm4 = nullptr, Node* parm5 = nullptr,
1367 Node* parm6 = nullptr) const;
1368
1369 public:
1370 // Creates a CallLeafNode for a runtime call that prints a static string and the values of the
1371 // nodes passed as arguments.
1372 // This function also takes care of doing the necessary wiring, including finding a suitable control
1373 // based on the nodes that need to be printed. Note that passing nodes that have incompatible controls
1374 // is undefined behavior.
1375 template <typename... TT, typename... NN>
1376 Node* make_debug_print(const char* str, PhaseGVN* gvn, NN... in) {
1377 address call_addr = CAST_FROM_FN_PTR(address, SharedRuntime::debug_print<TT...>);
1378 return make_debug_print_call(str, call_addr, gvn, in...);
1379 }
1380 #endif
1381 };
1382
1383 #endif // SHARE_OPTO_COMPILE_HPP