1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_COMPILE_HPP
26 #define SHARE_OPTO_COMPILE_HPP
27
28 #include "asm/codeBuffer.hpp"
29 #include "ci/compilerInterface.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "compiler/compiler_globals.hpp"
32 #include "compiler/compilerOracle.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "compiler/compilerEvent.hpp"
35 #include "compiler/cHeapStringHolder.hpp"
36 #include "libadt/dict.hpp"
37 #include "libadt/vectset.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/methodData.hpp"
40 #include "opto/idealGraphPrinter.hpp"
41 #include "opto/phasetype.hpp"
42 #include "opto/phase.hpp"
43 #include "opto/regmask.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/sharedRuntime.hpp"
46 #include "runtime/timerTrace.hpp"
47 #include "runtime/vmThread.hpp"
48 #include "utilities/ticks.hpp"
49
50 class AbstractLockNode;
51 class AddPNode;
52 class Block;
53 class Bundle;
54 class CallGenerator;
55 class CallStaticJavaNode;
56 class CloneMap;
57 class ConnectionGraph;
58 class IdealGraphPrinter;
59 class InlineTree;
60 class Matcher;
61 class MachConstantNode;
62 class MachConstantBaseNode;
63 class MachNode;
64 class MachOper;
65 class MachSafePointNode;
66 class Node;
67 class Node_Array;
68 class Node_List;
69 class Node_Notes;
70 class NodeHash;
71 class NodeCloneInfo;
72 class OptoReg;
73 class PhaseCFG;
74 class PhaseGVN;
75 class PhaseIterGVN;
76 class PhaseRegAlloc;
77 class PhaseCCP;
78 class PhaseOutput;
79 class RootNode;
80 class relocInfo;
81 class StartNode;
82 class SafePointNode;
83 class JVMState;
84 class Type;
85 class TypeInt;
86 class TypeInteger;
87 class TypeKlassPtr;
88 class TypePtr;
89 class TypeOopPtr;
90 class TypeFunc;
91 class TypeVect;
92 class Type_Array;
93 class Unique_Node_List;
94 class UnstableIfTrap;
95 class nmethod;
96 class Node_Stack;
97 struct Final_Reshape_Counts;
98 class VerifyMeetResult;
99
100 enum LoopOptsMode {
101 LoopOptsDefault,
102 LoopOptsNone,
103 LoopOptsMaxUnroll,
104 LoopOptsShenandoahExpand,
105 LoopOptsShenandoahPostExpand,
106 LoopOptsSkipSplitIf,
107 LoopOptsVerify
108 };
109
110 // The type of all node counts and indexes.
111 // It must hold at least 16 bits, but must also be fast to load and store.
112 // This type, if less than 32 bits, could limit the number of possible nodes.
113 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
114 typedef unsigned int node_idx_t;
115
116 class NodeCloneInfo {
117 private:
118 uint64_t _idx_clone_orig;
119 public:
120
121 void set_idx(node_idx_t idx) {
122 _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx;
123 }
124 node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
125
126 void set_gen(int generation) {
127 uint64_t g = (uint64_t)generation << 32;
128 _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
129 }
130 int gen() const { return (int)(_idx_clone_orig >> 32); }
131
132 void set(uint64_t x) { _idx_clone_orig = x; }
133 void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
134 uint64_t get() const { return _idx_clone_orig; }
135
136 NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
137 NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
138
139 void dump_on(outputStream* st) const;
140 };
141
142 class CloneMap {
143 friend class Compile;
144 private:
145 bool _debug;
146 Dict* _dict;
147 int _clone_idx; // current cloning iteration/generation in loop unroll
148 public:
149 void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
150 node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; }
151 Dict* dict() const { return _dict; }
152 void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); }
153 void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
154 void remove(node_idx_t key) { _dict->Delete(_2p(key)); }
155 uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); }
156 node_idx_t idx(node_idx_t key) const { return NodeCloneInfo(value(key)).idx(); }
157 int gen(node_idx_t key) const { return NodeCloneInfo(value(key)).gen(); }
158 int gen(const void* k) const { return gen(_2_node_idx_t(k)); }
159 int max_gen() const;
160 void clone(Node* old, Node* nnn, int gen);
161 void verify_insert_and_clone(Node* old, Node* nnn, int gen);
162 void dump(node_idx_t key, outputStream* st) const;
163
164 int clone_idx() const { return _clone_idx; }
165 void set_clone_idx(int x) { _clone_idx = x; }
166 bool is_debug() const { return _debug; }
167 void set_debug(bool debug) { _debug = debug; }
168
169 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
170 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
171 };
172
173 class Options {
174 friend class Compile;
175 friend class VMStructs;
176 private:
177 const bool _subsume_loads; // Load can be matched as part of a larger op.
178 const bool _do_escape_analysis; // Do escape analysis.
179 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
180 const bool _eliminate_boxing; // Do boxing elimination.
181 const bool _do_locks_coarsening; // Do locks coarsening
182 const bool _install_code; // Install the code that was compiled
183 public:
184 Options(bool subsume_loads, bool do_escape_analysis,
185 bool do_iterative_escape_analysis,
186 bool eliminate_boxing, bool do_locks_coarsening,
187 bool install_code) :
188 _subsume_loads(subsume_loads),
189 _do_escape_analysis(do_escape_analysis),
190 _do_iterative_escape_analysis(do_iterative_escape_analysis),
191 _eliminate_boxing(eliminate_boxing),
192 _do_locks_coarsening(do_locks_coarsening),
193 _install_code(install_code) {
194 }
195
196 static Options for_runtime_stub() {
197 return Options(
198 /* subsume_loads = */ true,
199 /* do_escape_analysis = */ false,
200 /* do_iterative_escape_analysis = */ false,
201 /* eliminate_boxing = */ false,
202 /* do_lock_coarsening = */ false,
203 /* install_code = */ true
204 );
205 }
206 };
207
208 //------------------------------Compile----------------------------------------
209 // This class defines a top-level Compiler invocation.
210
211 class Compile : public Phase {
212 friend class VMStructs;
213
214 public:
215 // Fixed alias indexes. (See also MergeMemNode.)
216 enum {
217 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
218 AliasIdxBot = 2, // pseudo-index, aliases to everything
219 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
220 };
221
222 // Variant of TraceTime(nullptr, &_t_accumulator, CITime);
223 // Integrated with logging. If logging is turned on, and CITimeVerbose is true,
224 // then brackets are put into the log, with time stamps and node counts.
225 // (The time collection itself is always conditionalized on CITime.)
226 class TracePhase : public TraceTime {
227 private:
228 Compile* C;
229 CompileLog* _log;
230 const char* _phase_name;
231 bool _dolog;
232 public:
233 TracePhase(const char* name, elapsedTimer* accumulator);
234 ~TracePhase();
235 };
236
237 // Information per category of alias (memory slice)
238 class AliasType {
239 private:
240 friend class Compile;
241
242 int _index; // unique index, used with MergeMemNode
243 const TypePtr* _adr_type; // normalized address type
244 ciField* _field; // relevant instance field, or null if none
245 const Type* _element; // relevant array element type, or null if none
246 bool _is_rewritable; // false if the memory is write-once only
247 int _general_index; // if this is type is an instance, the general
248 // type that this is an instance of
249
250 void Init(int i, const TypePtr* at);
251
252 public:
253 int index() const { return _index; }
254 const TypePtr* adr_type() const { return _adr_type; }
255 ciField* field() const { return _field; }
256 const Type* element() const { return _element; }
257 bool is_rewritable() const { return _is_rewritable; }
258 bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
259 int general_index() const { return (_general_index != 0) ? _general_index : _index; }
260
261 void set_rewritable(bool z) { _is_rewritable = z; }
262 void set_field(ciField* f) {
263 assert(!_field,"");
264 _field = f;
265 if (f->is_final() || f->is_stable()) {
266 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
267 _is_rewritable = false;
268 }
269 }
270 void set_element(const Type* e) {
271 assert(_element == nullptr, "");
272 _element = e;
273 }
274
275 BasicType basic_type() const;
276
277 void print_on(outputStream* st) PRODUCT_RETURN;
278 };
279
280 enum {
281 logAliasCacheSize = 6,
282 AliasCacheSize = (1<<logAliasCacheSize)
283 };
284 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
285 enum {
286 trapHistLength = MethodData::_trap_hist_limit
287 };
288
289 private:
290 // Fixed parameters to this compilation.
291 const int _compile_id;
292 const Options _options; // Compilation options
293 ciMethod* _method; // The method being compiled.
294 int _entry_bci; // entry bci for osr methods.
295 const TypeFunc* _tf; // My kind of signature
296 InlineTree* _ilt; // Ditto (temporary).
297 address _stub_function; // VM entry for stub being compiled, or null
298 const char* _stub_name; // Name of stub or adapter being compiled, or null
299 address _stub_entry_point; // Compile code entry for generated stub, or null
300
301 // Control of this compilation.
302 int _max_inline_size; // Max inline size for this compilation
303 int _freq_inline_size; // Max hot method inline size for this compilation
304 int _fixed_slots; // count of frame slots not allocated by the register
305 // allocator i.e. locks, original deopt pc, etc.
306 uintx _max_node_limit; // Max unique node count during a single compilation.
307
308 bool _post_loop_opts_phase; // Loop opts are finished.
309
310 int _major_progress; // Count of something big happening
311 bool _inlining_progress; // progress doing incremental inlining?
312 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
313 bool _do_cleanup; // Cleanup is needed before proceeding with incremental inlining
314 bool _has_loops; // True if the method _may_ have some loops
315 bool _has_split_ifs; // True if the method _may_ have some split-if
316 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
317 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
318 bool _has_boxed_value; // True if a boxed object is allocated
319 bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
320 uint _max_vector_size; // Maximum size of generated vectors
321 bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper
322 uint _trap_hist[trapHistLength]; // Cumulative traps
323 bool _trap_can_recompile; // Have we emitted a recompiling trap?
324 uint _decompile_count; // Cumulative decompilation counts.
325 bool _do_inlining; // True if we intend to do inlining
326 bool _do_scheduling; // True if we intend to do scheduling
327 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
328 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
329 bool _use_cmove; // True if CMove should be used without profitability analysis
330 bool _do_aliasing; // True if we intend to do aliasing
331 bool _print_assembly; // True if we should dump assembly code for this compilation
332 bool _print_inlining; // True if we should print inlining for this compilation
333 bool _print_intrinsics; // True if we should print intrinsics for this compilation
334 #ifndef PRODUCT
335 uint _igv_idx; // Counter for IGV node identifiers
336 bool _trace_opto_output;
337 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
338 #endif
339 bool _has_irreducible_loop; // Found irreducible loops
340 // JSR 292
341 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
342 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
343 RTMState _rtm_state; // State of Restricted Transactional Memory usage
344 int _loop_opts_cnt; // loop opts round
345 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
346 uint _stress_seed; // Seed for stress testing
347
348 // Compilation environment.
349 Arena _comp_arena; // Arena with lifetime equivalent to Compile
350 void* _barrier_set_state; // Potential GC barrier state for Compile
351 ciEnv* _env; // CI interface
352 DirectiveSet* _directive; // Compiler directive
353 CompileLog* _log; // from CompilerThread
354 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
355 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
356 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
357 GrowableArray<Node*> _parse_predicate_opaqs; // List of Opaque1 nodes for the Parse Predicates.
358 GrowableArray<Node*> _template_assertion_predicate_opaqs; // List of Opaque4 nodes for Template Assertion Predicates.
359 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
360 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
361 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
362 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
363 ConnectionGraph* _congraph;
364 #ifndef PRODUCT
365 IdealGraphPrinter* _igv_printer;
366 static IdealGraphPrinter* _debug_file_printer;
367 static IdealGraphPrinter* _debug_network_printer;
368 #endif
369
370
371 // Node management
372 uint _unique; // Counter for unique Node indices
373 VectorSet _dead_node_list; // Set of dead nodes
374 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
375 // So use this to keep count and make the call O(1).
376 DEBUG_ONLY(Unique_Node_List* _modified_nodes;) // List of nodes which inputs were modified
377 DEBUG_ONLY(bool _phase_optimize_finished;) // Used for live node verification while creating new nodes
378
379 Arena _node_arena; // Arena for new-space Nodes
380 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
381 RootNode* _root; // Unique root of compilation, or null after bail-out.
382 Node* _top; // Unique top node. (Reset by various phases.)
383
384 Node* _immutable_memory; // Initial memory state
385
386 Node* _recent_alloc_obj;
387 Node* _recent_alloc_ctl;
388
389 // Constant table
390 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
391
392
393 // Blocked array of debugging and profiling information,
394 // tracked per node.
395 enum { _log2_node_notes_block_size = 8,
396 _node_notes_block_size = (1<<_log2_node_notes_block_size)
397 };
398 GrowableArray<Node_Notes*>* _node_note_array;
399 Node_Notes* _default_node_notes; // default notes for new nodes
400
401 // After parsing and every bulk phase we hang onto the Root instruction.
402 // The RootNode instruction is where the whole program begins. It produces
403 // the initial Control and BOTTOM for everybody else.
404
405 // Type management
406 Arena _Compile_types; // Arena for all types
407 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
408 Dict* _type_dict; // Intern table
409 CloneMap _clone_map; // used for recording history of cloned nodes
410 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
411 ciMethod* _last_tf_m; // Cache for
412 const TypeFunc* _last_tf; // TypeFunc::make
413 AliasType** _alias_types; // List of alias types seen so far.
414 int _num_alias_types; // Logical length of _alias_types
415 int _max_alias_types; // Physical length of _alias_types
416 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
417
418 // Parsing, optimization
419 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
420
421 // Shared worklist for all IGVN rounds. Nodes can be pushed to it at any time.
422 // If pushed outside IGVN, the Node is processed in the next IGVN round.
423 Unique_Node_List* _igvn_worklist;
424
425 // Shared type array for GVN, IGVN and CCP. It maps node idx -> Type*.
426 Type_Array* _types;
427
428 // Shared node hash table for GVN, IGVN and CCP.
429 NodeHash* _node_hash;
430
431 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
432 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
433 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
434
435 GrowableArray<CallGenerator*> _vector_reboxing_late_inlines; // same but for vector reboxing operations
436
437 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
438 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
439
440 // Inlining may not happen in parse order which would make
441 // PrintInlining output confusing. Keep track of PrintInlining
442 // pieces in order.
443 class PrintInliningBuffer : public CHeapObj<mtCompiler> {
444 private:
445 CallGenerator* _cg;
446 stringStream _ss;
447 static const size_t default_stream_buffer_size = 128;
448
449 public:
450 PrintInliningBuffer()
451 : _cg(nullptr), _ss(default_stream_buffer_size) {}
452
453 stringStream* ss() { return &_ss; }
454 CallGenerator* cg() { return _cg; }
455 void set_cg(CallGenerator* cg) { _cg = cg; }
456 };
457
458 stringStream* _print_inlining_stream;
459 GrowableArray<PrintInliningBuffer*>* _print_inlining_list;
460 int _print_inlining_idx;
461 char* _print_inlining_output;
462
463 // Only keep nodes in the expensive node list that need to be optimized
464 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
465 // Use for sorting expensive nodes to bring similar nodes together
466 static int cmp_expensive_nodes(Node** n1, Node** n2);
467 // Expensive nodes list already sorted?
468 bool expensive_nodes_sorted() const;
469 // Remove the speculative part of types and clean up the graph
470 void remove_speculative_types(PhaseIterGVN &igvn);
471
472 void* _replay_inline_data; // Pointer to data loaded from file
473
474 void print_inlining_init();
475 void print_inlining_reinit();
476 void print_inlining_commit();
477 void print_inlining_push();
478 PrintInliningBuffer* print_inlining_current();
479
480 void log_late_inline_failure(CallGenerator* cg, const char* msg);
481 DEBUG_ONLY(bool _exception_backedge;)
482
483 public:
484
485 void* barrier_set_state() const { return _barrier_set_state; }
486
487 stringStream* print_inlining_stream() {
488 assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
489 return _print_inlining_stream;
490 }
491
492 void print_inlining_update(CallGenerator* cg);
493 void print_inlining_update_delayed(CallGenerator* cg);
494 void print_inlining_move_to(CallGenerator* cg);
495 void print_inlining_assert_ready();
496 void print_inlining_reset();
497
498 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = nullptr) {
499 stringStream ss;
500 CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
501 print_inlining_stream()->print("%s", ss.freeze());
502 }
503
504 #ifndef PRODUCT
505 IdealGraphPrinter* igv_printer() { return _igv_printer; }
506 #endif
507
508 void log_late_inline(CallGenerator* cg);
509 void log_inline_id(CallGenerator* cg);
510 void log_inline_failure(const char* msg);
511
512 void* replay_inline_data() const { return _replay_inline_data; }
513
514 // Dump inlining replay data to the stream.
515 void dump_inline_data(outputStream* out);
516 void dump_inline_data_reduced(outputStream* out);
517
518 private:
519 // Matching, CFG layout, allocation, code generation
520 PhaseCFG* _cfg; // Results of CFG finding
521 int _java_calls; // Number of java calls in the method
522 int _inner_loops; // Number of inner loops in the method
523 Matcher* _matcher; // Engine to map ideal to machine instructions
524 PhaseRegAlloc* _regalloc; // Results of register allocation.
525 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
526 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
527 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
528 int _interpreter_frame_size;
529
530 PhaseOutput* _output;
531
532 public:
533 // Accessors
534
535 // The Compile instance currently active in this (compiler) thread.
536 static Compile* current() {
537 return (Compile*) ciEnv::current()->compiler_data();
538 }
539
540 int interpreter_frame_size() const { return _interpreter_frame_size; }
541
542 PhaseOutput* output() const { return _output; }
543 void set_output(PhaseOutput* o) { _output = o; }
544
545 // ID for this compilation. Useful for setting breakpoints in the debugger.
546 int compile_id() const { return _compile_id; }
547 DirectiveSet* directive() const { return _directive; }
548
549 // Does this compilation allow instructions to subsume loads? User
550 // instructions that subsume a load may result in an unschedulable
551 // instruction sequence.
552 bool subsume_loads() const { return _options._subsume_loads; }
553 /** Do escape analysis. */
554 bool do_escape_analysis() const { return _options._do_escape_analysis; }
555 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
556 /** Do boxing elimination. */
557 bool eliminate_boxing() const { return _options._eliminate_boxing; }
558 /** Do aggressive boxing elimination. */
559 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
560 bool should_install_code() const { return _options._install_code; }
561 /** Do locks coarsening. */
562 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
563
564 // Other fixed compilation parameters.
565 ciMethod* method() const { return _method; }
566 int entry_bci() const { return _entry_bci; }
567 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
568 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
569 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
570 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
571 InlineTree* ilt() const { return _ilt; }
572 address stub_function() const { return _stub_function; }
573 const char* stub_name() const { return _stub_name; }
574 address stub_entry_point() const { return _stub_entry_point; }
575 void set_stub_entry_point(address z) { _stub_entry_point = z; }
576
577 // Control of this compilation.
578 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
579 void set_fixed_slots(int n) { _fixed_slots = n; }
580 int major_progress() const { return _major_progress; }
581 void set_inlining_progress(bool z) { _inlining_progress = z; }
582 int inlining_progress() const { return _inlining_progress; }
583 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
584 int inlining_incrementally() const { return _inlining_incrementally; }
585 void set_do_cleanup(bool z) { _do_cleanup = z; }
586 int do_cleanup() const { return _do_cleanup; }
587 void set_major_progress() { _major_progress++; }
588 void restore_major_progress(int progress) { _major_progress += progress; }
589 void clear_major_progress() { _major_progress = 0; }
590 int max_inline_size() const { return _max_inline_size; }
591 void set_freq_inline_size(int n) { _freq_inline_size = n; }
592 int freq_inline_size() const { return _freq_inline_size; }
593 void set_max_inline_size(int n) { _max_inline_size = n; }
594 bool has_loops() const { return _has_loops; }
595 void set_has_loops(bool z) { _has_loops = z; }
596 bool has_split_ifs() const { return _has_split_ifs; }
597 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
598 bool has_unsafe_access() const { return _has_unsafe_access; }
599 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
600 bool has_stringbuilder() const { return _has_stringbuilder; }
601 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
602 bool has_boxed_value() const { return _has_boxed_value; }
603 void set_has_boxed_value(bool z) { _has_boxed_value = z; }
604 bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
605 void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
606 uint max_vector_size() const { return _max_vector_size; }
607 void set_max_vector_size(uint s) { _max_vector_size = s; }
608 bool clear_upper_avx() const { return _clear_upper_avx; }
609 void set_clear_upper_avx(bool s) { _clear_upper_avx = s; }
610 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
611 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
612 bool trap_can_recompile() const { return _trap_can_recompile; }
613 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
614 uint decompile_count() const { return _decompile_count; }
615 void set_decompile_count(uint c) { _decompile_count = c; }
616 bool allow_range_check_smearing() const;
617 bool do_inlining() const { return _do_inlining; }
618 void set_do_inlining(bool z) { _do_inlining = z; }
619 bool do_scheduling() const { return _do_scheduling; }
620 void set_do_scheduling(bool z) { _do_scheduling = z; }
621 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
622 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
623 bool do_vector_loop() const { return _do_vector_loop; }
624 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
625 bool use_cmove() const { return _use_cmove; }
626 void set_use_cmove(bool z) { _use_cmove = z; }
627 bool do_aliasing() const { return _do_aliasing; }
628 bool print_assembly() const { return _print_assembly; }
629 void set_print_assembly(bool z) { _print_assembly = z; }
630 bool print_inlining() const { return _print_inlining; }
631 void set_print_inlining(bool z) { _print_inlining = z; }
632 bool print_intrinsics() const { return _print_intrinsics; }
633 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
634 RTMState rtm_state() const { return _rtm_state; }
635 void set_rtm_state(RTMState s) { _rtm_state = s; }
636 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
637 bool profile_rtm() const { return _rtm_state == ProfileRTM; }
638 uint max_node_limit() const { return (uint)_max_node_limit; }
639 void set_max_node_limit(uint n) { _max_node_limit = n; }
640 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
641 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
642 bool has_monitors() const { return _has_monitors; }
643 void set_has_monitors(bool v) { _has_monitors = v; }
644
645 // check the CompilerOracle for special behaviours for this compile
646 bool method_has_option(enum CompileCommand option) {
647 return method() != nullptr && method()->has_option(option);
648 }
649
650 #ifndef PRODUCT
651 uint next_igv_idx() { return _igv_idx++; }
652 bool trace_opto_output() const { return _trace_opto_output; }
653 void print_ideal_ir(const char* phase_name);
654 bool should_print_ideal() const { return _directive->PrintIdealOption; }
655 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
656 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
657 int _in_dump_cnt; // Required for dumping ir nodes.
658 #endif
659 bool has_irreducible_loop() const { return _has_irreducible_loop; }
660 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
661
662 // JSR 292
663 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
664 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
665
666 Ticks _latest_stage_start_counter;
667
668 void begin_method();
669 void end_method();
670 bool should_print_igv(int level);
671 bool should_print_phase(CompilerPhaseType cpt);
672
673 void print_method(CompilerPhaseType cpt, int level, Node* n = nullptr);
674
675 #ifndef PRODUCT
676 void dump_igv(const char* graph_name, int level = 3) {
677 if (should_print_igv(level)) {
678 _igv_printer->print_method(graph_name, level);
679 }
680 }
681
682 void igv_print_method_to_file(const char* phase_name = "Debug", bool append = false);
683 void igv_print_method_to_network(const char* phase_name = "Debug");
684 static IdealGraphPrinter* debug_file_printer() { return _debug_file_printer; }
685 static IdealGraphPrinter* debug_network_printer() { return _debug_network_printer; }
686 #endif
687
688 int macro_count() const { return _macro_nodes.length(); }
689 int parse_predicate_count() const { return _parse_predicate_opaqs.length(); }
690 int template_assertion_predicate_count() const { return _template_assertion_predicate_opaqs.length(); }
691 int expensive_count() const { return _expensive_nodes.length(); }
692 int coarsened_count() const { return _coarsened_locks.length(); }
693
694 Node* macro_node(int idx) const { return _macro_nodes.at(idx); }
695 Node* parse_predicate_opaque1_node(int idx) const { return _parse_predicate_opaqs.at(idx); }
696
697 Node* template_assertion_predicate_opaq_node(int idx) const {
698 return _template_assertion_predicate_opaqs.at(idx);
699 }
700
701 Node* expensive_node(int idx) const { return _expensive_nodes.at(idx); }
702
703 ConnectionGraph* congraph() { return _congraph;}
704 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
705 void add_macro_node(Node * n) {
706 //assert(n->is_macro(), "must be a macro node");
707 assert(!_macro_nodes.contains(n), "duplicate entry in expand list");
708 _macro_nodes.append(n);
709 }
710 void remove_macro_node(Node* n) {
711 // this function may be called twice for a node so we can only remove it
712 // if it's still existing.
713 _macro_nodes.remove_if_existing(n);
714 // remove from _parse_predicate_opaqs list also if it is there
715 if (parse_predicate_count() > 0) {
716 _parse_predicate_opaqs.remove_if_existing(n);
717 }
718 // Remove from coarsened locks list if present
719 if (coarsened_count() > 0) {
720 remove_coarsened_lock(n);
721 }
722 }
723 void add_expensive_node(Node* n);
724 void remove_expensive_node(Node* n) {
725 _expensive_nodes.remove_if_existing(n);
726 }
727 void add_parse_predicate_opaq(Node* n) {
728 assert(!_parse_predicate_opaqs.contains(n), "duplicate entry in Parse Predicate opaque1 list");
729 assert(_macro_nodes.contains(n), "should have already been in macro list");
730 _parse_predicate_opaqs.append(n);
731 }
732 void add_template_assertion_predicate_opaq(Node* n) {
733 assert(!_template_assertion_predicate_opaqs.contains(n),
734 "duplicate entry in template assertion predicate opaque4 list");
735 _template_assertion_predicate_opaqs.append(n);
736 }
737 void remove_template_assertion_predicate_opaq(Node* n) {
738 if (template_assertion_predicate_count() > 0) {
739 _template_assertion_predicate_opaqs.remove_if_existing(n);
740 }
741 }
742 void add_coarsened_locks(GrowableArray<AbstractLockNode*>& locks);
743 void remove_coarsened_lock(Node* n);
744 bool coarsened_locks_consistent();
745 void mark_unbalanced_boxes() const;
746
747 bool post_loop_opts_phase() { return _post_loop_opts_phase; }
748 void set_post_loop_opts_phase() { _post_loop_opts_phase = true; }
749 void reset_post_loop_opts_phase() { _post_loop_opts_phase = false; }
750
751 void record_for_post_loop_opts_igvn(Node* n);
752 void remove_from_post_loop_opts_igvn(Node* n);
753 void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
754
755 void record_unstable_if_trap(UnstableIfTrap* trap);
756 bool remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield);
757 void remove_useless_unstable_if_traps(Unique_Node_List &useful);
758 void process_for_unstable_if_traps(PhaseIterGVN& igvn);
759
760 void sort_macro_nodes();
761
762 // Remove the opaque nodes that protect the Parse Predicates so that the unused checks and
763 // uncommon traps will be eliminated from the graph.
764 void cleanup_parse_predicates(PhaseIterGVN &igvn) const;
765 bool is_predicate_opaq(Node* n) const {
766 return _parse_predicate_opaqs.contains(n);
767 }
768
769 // Are there candidate expensive nodes for optimization?
770 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
771 // Check whether n1 and n2 are similar
772 static int cmp_expensive_nodes(Node* n1, Node* n2);
773 // Sort expensive nodes to locate similar expensive nodes
774 void sort_expensive_nodes();
775
776 // Compilation environment.
777 Arena* comp_arena() { return &_comp_arena; }
778 ciEnv* env() const { return _env; }
779 CompileLog* log() const { return _log; }
780
781 bool failing() const {
782 return _env->failing() ||
783 _failure_reason.get() != nullptr;
784 }
785
786 const char* failure_reason() const {
787 return _env->failing() ? _env->failure_reason()
788 : _failure_reason.get();
789 }
790
791 bool failure_reason_is(const char* r) const {
792 return (r == _failure_reason.get()) ||
793 (r != nullptr &&
794 _failure_reason.get() != nullptr &&
795 strcmp(r, _failure_reason.get()) == 0);
796 }
797
798 void record_failure(const char* reason);
799 void record_method_not_compilable(const char* reason) {
800 env()->record_method_not_compilable(reason);
801 // Record failure reason.
802 record_failure(reason);
803 }
804 bool check_node_count(uint margin, const char* reason) {
805 if (live_nodes() + margin > max_node_limit()) {
806 record_method_not_compilable(reason);
807 return true;
808 } else {
809 return false;
810 }
811 }
812
813 // Node management
814 uint unique() const { return _unique; }
815 uint next_unique() { return _unique++; }
816 void set_unique(uint i) { _unique = i; }
817 Arena* node_arena() { return &_node_arena; }
818 Arena* old_arena() { return &_old_arena; }
819 RootNode* root() const { return _root; }
820 void set_root(RootNode* r) { _root = r; }
821 StartNode* start() const; // (Derived from root.)
822 void init_start(StartNode* s);
823 Node* immutable_memory();
824
825 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
826 Node* recent_alloc_obj() const { return _recent_alloc_obj; }
827 void set_recent_alloc(Node* ctl, Node* obj) {
828 _recent_alloc_ctl = ctl;
829 _recent_alloc_obj = obj;
830 }
831 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
832 _dead_node_count++;
833 }
834 void reset_dead_node_list() { _dead_node_list.reset();
835 _dead_node_count = 0;
836 }
837 uint live_nodes() const {
838 int val = _unique - _dead_node_count;
839 assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count);
840 return (uint) val;
841 }
842 #ifdef ASSERT
843 void set_phase_optimize_finished() { _phase_optimize_finished = true; }
844 bool phase_optimize_finished() const { return _phase_optimize_finished; }
845 uint count_live_nodes_by_graph_walk();
846 void print_missing_nodes();
847 #endif
848
849 // Record modified nodes to check that they are put on IGVN worklist
850 void record_modified_node(Node* n) NOT_DEBUG_RETURN;
851 void remove_modified_node(Node* n) NOT_DEBUG_RETURN;
852 DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } )
853
854 MachConstantBaseNode* mach_constant_base_node();
855 bool has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; }
856 // Generated by adlc, true if CallNode requires MachConstantBase.
857 bool needs_deep_clone_jvms();
858
859 // Handy undefined Node
860 Node* top() const { return _top; }
861
862 // these are used by guys who need to know about creation and transformation of top:
863 Node* cached_top_node() { return _top; }
864 void set_cached_top_node(Node* tn);
865
866 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
867 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
868 Node_Notes* default_node_notes() const { return _default_node_notes; }
869 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
870
871 Node_Notes* node_notes_at(int idx) {
872 return locate_node_notes(_node_note_array, idx, false);
873 }
874 inline bool set_node_notes_at(int idx, Node_Notes* value);
875
876 // Copy notes from source to dest, if they exist.
877 // Overwrite dest only if source provides something.
878 // Return true if information was moved.
879 bool copy_node_notes_to(Node* dest, Node* source);
880
881 // Workhorse function to sort out the blocked Node_Notes array:
882 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
883 int idx, bool can_grow = false);
884
885 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
886
887 // Type management
888 Arena* type_arena() { return _type_arena; }
889 Dict* type_dict() { return _type_dict; }
890 size_t type_last_size() { return _type_last_size; }
891 int num_alias_types() { return _num_alias_types; }
892
893 void init_type_arena() { _type_arena = &_Compile_types; }
894 void set_type_arena(Arena* a) { _type_arena = a; }
895 void set_type_dict(Dict* d) { _type_dict = d; }
896 void set_type_last_size(size_t sz) { _type_last_size = sz; }
897
898 const TypeFunc* last_tf(ciMethod* m) {
899 return (m == _last_tf_m) ? _last_tf : nullptr;
900 }
901 void set_last_tf(ciMethod* m, const TypeFunc* tf) {
902 assert(m != nullptr || tf == nullptr, "");
903 _last_tf_m = m;
904 _last_tf = tf;
905 }
906
907 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
908 AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr) { return find_alias_type(adr_type, false, field); }
909 bool have_alias_type(const TypePtr* adr_type);
910 AliasType* alias_type(ciField* field);
911
912 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
913 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
914 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
915
916 // Building nodes
917 void rethrow_exceptions(JVMState* jvms);
918 void return_values(JVMState* jvms);
919 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
920
921 // Decide how to build a call.
922 // The profile factor is a discount to apply to this site's interp. profile.
923 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
924 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr,
925 bool allow_intrinsics = true);
926 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
927 return should_delay_string_inlining(call_method, jvms) ||
928 should_delay_boxing_inlining(call_method, jvms) ||
929 should_delay_vector_inlining(call_method, jvms);
930 }
931 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
932 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
933 bool should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms);
934 bool should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms);
935
936 // Helper functions to identify inlining potential at call-site
937 ciMethod* optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
938 ciKlass* holder, ciMethod* callee,
939 const TypeOopPtr* receiver_type, bool is_virtual,
940 bool &call_does_dispatch, int &vtable_index,
941 bool check_access = true);
942 ciMethod* optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, ciKlass* holder,
943 ciMethod* callee, const TypeOopPtr* receiver_type,
944 bool check_access = true);
945
946 // Report if there were too many traps at a current method and bci.
947 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
948 // If there is no MDO at all, report no trap unless told to assume it.
949 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
950 // This version, unspecific to a particular bci, asks if
951 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
952 bool too_many_traps(Deoptimization::DeoptReason reason,
953 // Privately used parameter for logging:
954 ciMethodData* logmd = nullptr);
955 // Report if there were too many recompiles at a method and bci.
956 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
957 // Report if there were too many traps or recompiles at a method and bci.
958 bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
959 return too_many_traps(method, bci, reason) ||
960 too_many_recompiles(method, bci, reason);
961 }
962 // Return a bitset with the reasons where deoptimization is allowed,
963 // i.e., where there were not too many uncommon traps.
964 int _allowed_reasons;
965 int allowed_deopt_reasons() { return _allowed_reasons; }
966 void set_allowed_deopt_reasons();
967
968 // Parsing, optimization
969 PhaseGVN* initial_gvn() { return _initial_gvn; }
970 Unique_Node_List* igvn_worklist() {
971 assert(_igvn_worklist != nullptr, "must be created in Compile::Compile");
972 return _igvn_worklist;
973 }
974 Type_Array* types() {
975 assert(_types != nullptr, "must be created in Compile::Compile");
976 return _types;
977 }
978 NodeHash* node_hash() {
979 assert(_node_hash != nullptr, "must be created in Compile::Compile");
980 return _node_hash;
981 }
982 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp.
983 inline void remove_for_igvn(Node* n); // Body is after class Unique_Node_List in node.hpp.
984 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
985
986 // Replace n by nn using initial_gvn, calling hash_delete and
987 // record_for_igvn as needed.
988 void gvn_replace_by(Node* n, Node* nn);
989
990
991 void identify_useful_nodes(Unique_Node_List &useful);
992 void update_dead_node_list(Unique_Node_List &useful);
993 void disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist);
994
995 void remove_useless_node(Node* dead);
996
997 // Record this CallGenerator for inlining at the end of parsing.
998 void add_late_inline(CallGenerator* cg) {
999 _late_inlines.insert_before(_late_inlines_pos, cg);
1000 _late_inlines_pos++;
1001 }
1002
1003 void prepend_late_inline(CallGenerator* cg) {
1004 _late_inlines.insert_before(0, cg);
1005 }
1006
1007 void add_string_late_inline(CallGenerator* cg) {
1008 _string_late_inlines.push(cg);
1009 }
1010
1011 void add_boxing_late_inline(CallGenerator* cg) {
1012 _boxing_late_inlines.push(cg);
1013 }
1014
1015 void add_vector_reboxing_late_inline(CallGenerator* cg) {
1016 _vector_reboxing_late_inlines.push(cg);
1017 }
1018
1019 void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);
1020
1021 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
1022 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead);
1023
1024 void remove_useless_coarsened_locks(Unique_Node_List& useful);
1025
1026 void process_print_inlining();
1027 void dump_print_inlining();
1028
1029 bool over_inlining_cutoff() const {
1030 if (!inlining_incrementally()) {
1031 return unique() > (uint)NodeCountInliningCutoff;
1032 } else {
1033 // Give some room for incremental inlining algorithm to "breathe"
1034 // and avoid thrashing when live node count is close to the limit.
1035 // Keep in mind that live_nodes() isn't accurate during inlining until
1036 // dead node elimination step happens (see Compile::inline_incrementally).
1037 return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
1038 }
1039 }
1040
1041 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
1042 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
1043 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
1044
1045 bool inline_incrementally_one();
1046 void inline_incrementally_cleanup(PhaseIterGVN& igvn);
1047 void inline_incrementally(PhaseIterGVN& igvn);
1048 bool should_delay_inlining() { return AlwaysIncrementalInline || (StressIncrementalInlining && (random() % 2) == 0); }
1049 void inline_string_calls(bool parse_time);
1050 void inline_boxing_calls(PhaseIterGVN& igvn);
1051 bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
1052 void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
1053
1054 void inline_vector_reboxing_calls();
1055 bool has_vbox_nodes();
1056
1057 void process_late_inline_calls_no_inline(PhaseIterGVN& igvn);
1058
1059 // Matching, CFG layout, allocation, code generation
1060 PhaseCFG* cfg() { return _cfg; }
1061 bool has_java_calls() const { return _java_calls > 0; }
1062 int java_calls() const { return _java_calls; }
1063 int inner_loops() const { return _inner_loops; }
1064 Matcher* matcher() { return _matcher; }
1065 PhaseRegAlloc* regalloc() { return _regalloc; }
1066 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
1067 Arena* indexSet_arena() { return _indexSet_arena; }
1068 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
1069 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
1070
1071 void update_interpreter_frame_size(int size) {
1072 if (_interpreter_frame_size < size) {
1073 _interpreter_frame_size = size;
1074 }
1075 }
1076
1077 void set_matcher(Matcher* m) { _matcher = m; }
1078 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
1079 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
1080 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
1081
1082 void set_java_calls(int z) { _java_calls = z; }
1083 void set_inner_loops(int z) { _inner_loops = z; }
1084
1085 Dependencies* dependencies() { return env()->dependencies(); }
1086
1087 // Major entry point. Given a Scope, compile the associated method.
1088 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
1089 // replacement, entry_bci indicates the bytecode for which to compile a
1090 // continuation.
1091 Compile(ciEnv* ci_env, ciMethod* target,
1092 int entry_bci, Options options, DirectiveSet* directive);
1093
1094 // Second major entry point. From the TypeFunc signature, generate code
1095 // to pass arguments from the Java calling convention to the C calling
1096 // convention.
1097 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1098 address stub_function, const char *stub_name,
1099 int is_fancy_jump, bool pass_tls,
1100 bool return_pc, DirectiveSet* directive);
1101
1102 ~Compile() {
1103 delete _print_inlining_stream;
1104 };
1105
1106 // Are we compiling a method?
1107 bool has_method() { return method() != nullptr; }
1108
1109 // Maybe print some information about this compile.
1110 void print_compile_messages();
1111
1112 // Final graph reshaping, a post-pass after the regular optimizer is done.
1113 bool final_graph_reshaping();
1114
1115 // returns true if adr is completely contained in the given alias category
1116 bool must_alias(const TypePtr* adr, int alias_idx);
1117
1118 // returns true if adr overlaps with the given alias category
1119 bool can_alias(const TypePtr* adr, int alias_idx);
1120
1121 // Stack slots that may be unused by the calling convention but must
1122 // otherwise be preserved. On Intel this includes the return address.
1123 // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1124 uint in_preserve_stack_slots() {
1125 return SharedRuntime::in_preserve_stack_slots();
1126 }
1127
1128 // "Top of Stack" slots that may be unused by the calling convention but must
1129 // otherwise be preserved.
1130 // On Intel these are not necessary and the value can be zero.
1131 static uint out_preserve_stack_slots() {
1132 return SharedRuntime::out_preserve_stack_slots();
1133 }
1134
1135 // Number of outgoing stack slots killed above the out_preserve_stack_slots
1136 // for calls to C. Supports the var-args backing area for register parms.
1137 uint varargs_C_out_slots_killed() const;
1138
1139 // Number of Stack Slots consumed by a synchronization entry
1140 int sync_stack_slots() const;
1141
1142 // Compute the name of old_SP. See <arch>.ad for frame layout.
1143 OptoReg::Name compute_old_SP();
1144
1145 private:
1146 // Phase control:
1147 void Init(bool aliasing); // Prepare for a single compilation
1148 void Optimize(); // Given a graph, optimize it
1149 void Code_Gen(); // Generate code from a graph
1150
1151 // Management of the AliasType table.
1152 void grow_alias_types();
1153 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1154 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1155 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1156
1157 void verify_top(Node*) const PRODUCT_RETURN;
1158
1159 // Intrinsic setup.
1160 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
1161 int intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found); // helper
1162 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
1163 void register_intrinsic(CallGenerator* cg); // update fn
1164
1165 #ifndef PRODUCT
1166 static juint _intrinsic_hist_count[];
1167 static jubyte _intrinsic_hist_flags[];
1168 #endif
1169 // Function calls made by the public function final_graph_reshaping.
1170 // No need to be made public as they are not called elsewhere.
1171 void final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1172 void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes);
1173 void final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
1174 void eliminate_redundant_card_marks(Node* n);
1175
1176 // Logic cone optimization.
1177 void optimize_logic_cones(PhaseIterGVN &igvn);
1178 void collect_logic_cone_roots(Unique_Node_List& list);
1179 void process_logic_cone_root(PhaseIterGVN &igvn, Node* n, VectorSet& visited);
1180 bool compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs);
1181 uint compute_truth_table(Unique_Node_List& partition, Unique_Node_List& inputs);
1182 uint eval_macro_logic_op(uint func, uint op1, uint op2, uint op3);
1183 Node* xform_to_MacroLogicV(PhaseIterGVN &igvn, const TypeVect* vt, Unique_Node_List& partitions, Unique_Node_List& inputs);
1184 void check_no_dead_use() const NOT_DEBUG_RETURN;
1185
1186 public:
1187
1188 // Note: Histogram array size is about 1 Kb.
1189 enum { // flag bits:
1190 _intrinsic_worked = 1, // succeeded at least once
1191 _intrinsic_failed = 2, // tried it but it failed
1192 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1193 _intrinsic_virtual = 8, // was seen in the virtual form (rare)
1194 _intrinsic_both = 16 // was seen in the non-virtual form (usual)
1195 };
1196 // Update histogram. Return boolean if this is a first-time occurrence.
1197 static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1198 bool is_virtual, int flags) PRODUCT_RETURN0;
1199 static void print_intrinsic_statistics() PRODUCT_RETURN;
1200
1201 // Graph verification code
1202 // Walk the node list, verifying that there is a one-to-one
1203 // correspondence between Use-Def edges and Def-Use edges
1204 // The option no_dead_code enables stronger checks that the
1205 // graph is strongly connected from root in both directions.
1206 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1207
1208 // Verify bi-directional correspondence of edges
1209 void verify_bidirectional_edges(Unique_Node_List &visited);
1210
1211 // End-of-run dumps.
1212 static void print_statistics() PRODUCT_RETURN;
1213
1214 // Verify ADLC assumptions during startup
1215 static void adlc_verification() PRODUCT_RETURN;
1216
1217 // Definitions of pd methods
1218 static void pd_compiler2_init();
1219
1220 // Static parse-time type checking logic for gen_subtype_check:
1221 enum SubTypeCheckResult { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1222 SubTypeCheckResult static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip = StressReflectiveCode);
1223
1224 static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1225 // Optional control dependency (for example, on range check)
1226 Node* ctrl = nullptr);
1227
1228 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1229 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
1230
1231 // Auxiliary methods for randomized fuzzing/stressing
1232 int random();
1233 bool randomized_select(int count);
1234
1235 // supporting clone_map
1236 CloneMap& clone_map();
1237 void set_clone_map(Dict* d);
1238
1239 bool needs_clinit_barrier(ciField* ik, ciMethod* accessing_method);
1240 bool needs_clinit_barrier(ciMethod* ik, ciMethod* accessing_method);
1241 bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1242
1243 #ifdef IA32
1244 private:
1245 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
1246 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
1247
1248 // Remember if this compilation changes hardware mode to 24-bit precision.
1249 void set_24_bit_selection_and_mode(bool selection, bool mode) {
1250 _select_24_bit_instr = selection;
1251 _in_24_bit_fp_mode = mode;
1252 }
1253
1254 public:
1255 bool select_24_bit_instr() const { return _select_24_bit_instr; }
1256 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
1257 #endif // IA32
1258 #ifdef ASSERT
1259 VerifyMeetResult* _type_verify;
1260 void set_exception_backedge() { _exception_backedge = true; }
1261 bool has_exception_backedge() const { return _exception_backedge; }
1262 #endif
1263
1264 static bool push_thru_add(PhaseGVN* phase, Node* z, const TypeInteger* tz, const TypeInteger*& rx, const TypeInteger*& ry,
1265 BasicType out_bt, BasicType in_bt);
1266
1267 static Node* narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res);
1268 };
1269
1270 #endif // SHARE_OPTO_COMPILE_HPP
--- EOF ---