166
167 int clone_idx() const { return _clone_idx; }
168 void set_clone_idx(int x) { _clone_idx = x; }
169 bool is_debug() const { return _debug; }
170 void set_debug(bool debug) { _debug = debug; }
171
172 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
173 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
174 };
175
176 class Options {
177 friend class Compile;
178 friend class VMStructs;
179 private:
180 const bool _subsume_loads; // Load can be matched as part of a larger op.
181 const bool _do_escape_analysis; // Do escape analysis.
182 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
183 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges.
184 const bool _eliminate_boxing; // Do boxing elimination.
185 const bool _do_locks_coarsening; // Do locks coarsening
186 const bool _do_superword; // Do SuperWord
187 const bool _install_code; // Install the code that was compiled
188 public:
189 Options(bool subsume_loads,
190 bool do_escape_analysis,
191 bool do_iterative_escape_analysis,
192 bool do_reduce_allocation_merges,
193 bool eliminate_boxing,
194 bool do_locks_coarsening,
195 bool do_superword,
196 bool install_code) :
197 _subsume_loads(subsume_loads),
198 _do_escape_analysis(do_escape_analysis),
199 _do_iterative_escape_analysis(do_iterative_escape_analysis),
200 _do_reduce_allocation_merges(do_reduce_allocation_merges),
201 _eliminate_boxing(eliminate_boxing),
202 _do_locks_coarsening(do_locks_coarsening),
203 _do_superword(do_superword),
204 _install_code(install_code) {
205 }
206
207 static Options for_runtime_stub() {
208 return Options(
209 /* subsume_loads = */ true,
210 /* do_escape_analysis = */ false,
211 /* do_iterative_escape_analysis = */ false,
212 /* do_reduce_allocation_merges = */ false,
213 /* eliminate_boxing = */ false,
214 /* do_lock_coarsening = */ false,
215 /* do_superword = */ true,
216 /* install_code = */ true
217 );
218 }
219 };
220
221 //------------------------------Compile----------------------------------------
222 // This class defines a top-level Compiler invocation.
223
224 class Compile : public Phase {
225 friend class VMStructs;
226
227 public:
228 // Fixed alias indexes. (See also MergeMemNode.)
229 enum {
230 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
231 AliasIdxBot = 2, // pseudo-index, aliases to everything
232 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
233 };
234
341 bool _do_scheduling; // True if we intend to do scheduling
342 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
343 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
344 bool _use_cmove; // True if CMove should be used without profitability analysis
345 bool _do_aliasing; // True if we intend to do aliasing
346 bool _print_assembly; // True if we should dump assembly code for this compilation
347 bool _print_inlining; // True if we should print inlining for this compilation
348 bool _print_intrinsics; // True if we should print intrinsics for this compilation
349 #ifndef PRODUCT
350 uint _igv_idx; // Counter for IGV node identifiers
351 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
352 bool _trace_opto_output;
353 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
354 #endif
355 bool _has_irreducible_loop; // Found irreducible loops
356 // JSR 292
357 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
358 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
359 bool _has_scoped_access; // For shared scope closure
360 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
361 int _loop_opts_cnt; // loop opts round
362 uint _stress_seed; // Seed for stress testing
363
364 // Compilation environment.
365 Arena _comp_arena; // Arena with lifetime equivalent to Compile
366 void* _barrier_set_state; // Potential GC barrier state for Compile
367 ciEnv* _env; // CI interface
368 DirectiveSet* _directive; // Compiler directive
369 CompileLog* _log; // from CompilerThread
370 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
371 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
372 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
373 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
374 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
375 // List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates.
376 GrowableArray<Node*> _template_assertion_predicate_opaqs;
377 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
378 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
379 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
380 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
542 int compile_id() const { return _compile_id; }
543 DirectiveSet* directive() const { return _directive; }
544
545 // Does this compilation allow instructions to subsume loads? User
546 // instructions that subsume a load may result in an unschedulable
547 // instruction sequence.
548 bool subsume_loads() const { return _options._subsume_loads; }
549 /** Do escape analysis. */
550 bool do_escape_analysis() const { return _options._do_escape_analysis; }
551 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
552 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; }
553 /** Do boxing elimination. */
554 bool eliminate_boxing() const { return _options._eliminate_boxing; }
555 /** Do aggressive boxing elimination. */
556 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
557 bool should_install_code() const { return _options._install_code; }
558 /** Do locks coarsening. */
559 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
560 bool do_superword() const { return _options._do_superword; }
561
562 // Other fixed compilation parameters.
563 ciMethod* method() const { return _method; }
564 int entry_bci() const { return _entry_bci; }
565 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
566 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
567 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
568 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
569 InlineTree* ilt() const { return _ilt; }
570 address stub_function() const { return _stub_function; }
571 const char* stub_name() const { return _stub_name; }
572 address stub_entry_point() const { return _stub_entry_point; }
573 void set_stub_entry_point(address z) { _stub_entry_point = z; }
574
575 // Control of this compilation.
576 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
577 void set_fixed_slots(int n) { _fixed_slots = n; }
578 int major_progress() const { return _major_progress; }
579 void set_inlining_progress(bool z) { _inlining_progress = z; }
580 int inlining_progress() const { return _inlining_progress; }
581 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
620 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
621 bool do_vector_loop() const { return _do_vector_loop; }
622 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
623 bool use_cmove() const { return _use_cmove; }
624 void set_use_cmove(bool z) { _use_cmove = z; }
625 bool do_aliasing() const { return _do_aliasing; }
626 bool print_assembly() const { return _print_assembly; }
627 void set_print_assembly(bool z) { _print_assembly = z; }
628 bool print_inlining() const { return _print_inlining; }
629 void set_print_inlining(bool z) { _print_inlining = z; }
630 bool print_intrinsics() const { return _print_intrinsics; }
631 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
632 uint max_node_limit() const { return (uint)_max_node_limit; }
633 void set_max_node_limit(uint n) { _max_node_limit = n; }
634 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
635 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
636 bool has_monitors() const { return _has_monitors; }
637 void set_has_monitors(bool v) { _has_monitors = v; }
638 bool has_scoped_access() const { return _has_scoped_access; }
639 void set_has_scoped_access(bool v) { _has_scoped_access = v; }
640
641 // check the CompilerOracle for special behaviours for this compile
642 bool method_has_option(CompileCommandEnum option) {
643 return method() != nullptr && method()->has_option(option);
644 }
645
646 #ifndef PRODUCT
647 uint next_igv_idx() { return _igv_idx++; }
648 bool trace_opto_output() const { return _trace_opto_output; }
649 void print_ideal_ir(const char* phase_name);
650 bool should_print_ideal() const { return _directive->PrintIdealOption; }
651 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
652 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
653 int _in_dump_cnt; // Required for dumping ir nodes.
654 #endif
655 bool has_irreducible_loop() const { return _has_irreducible_loop; }
656 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
657
658 // JSR 292
659 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
166
167 int clone_idx() const { return _clone_idx; }
168 void set_clone_idx(int x) { _clone_idx = x; }
169 bool is_debug() const { return _debug; }
170 void set_debug(bool debug) { _debug = debug; }
171
172 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
173 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
174 };
175
176 class Options {
177 friend class Compile;
178 friend class VMStructs;
179 private:
180 const bool _subsume_loads; // Load can be matched as part of a larger op.
181 const bool _do_escape_analysis; // Do escape analysis.
182 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
183 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges.
184 const bool _eliminate_boxing; // Do boxing elimination.
185 const bool _do_locks_coarsening; // Do locks coarsening
186 const bool _for_preload; // Generate code for preload (before Java method execution), do class init barriers
187 const bool _do_superword; // Do SuperWord
188 const bool _install_code; // Install the code that was compiled
189 public:
190 Options(bool subsume_loads,
191 bool do_escape_analysis,
192 bool do_iterative_escape_analysis,
193 bool do_reduce_allocation_merges,
194 bool eliminate_boxing,
195 bool do_locks_coarsening,
196 bool do_superword,
197 bool for_preload,
198 bool install_code) :
199 _subsume_loads(subsume_loads),
200 _do_escape_analysis(do_escape_analysis),
201 _do_iterative_escape_analysis(do_iterative_escape_analysis),
202 _do_reduce_allocation_merges(do_reduce_allocation_merges),
203 _eliminate_boxing(eliminate_boxing),
204 _do_locks_coarsening(do_locks_coarsening),
205 _for_preload(for_preload),
206 _do_superword(do_superword),
207 _install_code(install_code) {
208 }
209
210 static Options for_runtime_stub() {
211 return Options(
212 /* subsume_loads = */ true,
213 /* do_escape_analysis = */ false,
214 /* do_iterative_escape_analysis = */ false,
215 /* do_reduce_allocation_merges = */ false,
216 /* eliminate_boxing = */ false,
217 /* do_lock_coarsening = */ false,
218 /* for_preload = */ false,
219 /* do_superword = */ true,
220 /* install_code = */ true
221 );
222 }
223 };
224
225 //------------------------------Compile----------------------------------------
226 // This class defines a top-level Compiler invocation.
227
228 class Compile : public Phase {
229 friend class VMStructs;
230
231 public:
232 // Fixed alias indexes. (See also MergeMemNode.)
233 enum {
234 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
235 AliasIdxBot = 2, // pseudo-index, aliases to everything
236 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
237 };
238
345 bool _do_scheduling; // True if we intend to do scheduling
346 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
347 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
348 bool _use_cmove; // True if CMove should be used without profitability analysis
349 bool _do_aliasing; // True if we intend to do aliasing
350 bool _print_assembly; // True if we should dump assembly code for this compilation
351 bool _print_inlining; // True if we should print inlining for this compilation
352 bool _print_intrinsics; // True if we should print intrinsics for this compilation
353 #ifndef PRODUCT
354 uint _igv_idx; // Counter for IGV node identifiers
355 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
356 bool _trace_opto_output;
357 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
358 #endif
359 bool _has_irreducible_loop; // Found irreducible loops
360 // JSR 292
361 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
362 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
363 bool _has_scoped_access; // For shared scope closure
364 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
365 bool _has_clinit_barriers; // True if compiled code has clinit barriers
366 int _loop_opts_cnt; // loop opts round
367 uint _stress_seed; // Seed for stress testing
368
369 // Compilation environment.
370 Arena _comp_arena; // Arena with lifetime equivalent to Compile
371 void* _barrier_set_state; // Potential GC barrier state for Compile
372 ciEnv* _env; // CI interface
373 DirectiveSet* _directive; // Compiler directive
374 CompileLog* _log; // from CompilerThread
375 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
376 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
377 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
378 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
379 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
380 // List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates.
381 GrowableArray<Node*> _template_assertion_predicate_opaqs;
382 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
383 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
384 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
385 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
547 int compile_id() const { return _compile_id; }
548 DirectiveSet* directive() const { return _directive; }
549
550 // Does this compilation allow instructions to subsume loads? User
551 // instructions that subsume a load may result in an unschedulable
552 // instruction sequence.
553 bool subsume_loads() const { return _options._subsume_loads; }
554 /** Do escape analysis. */
555 bool do_escape_analysis() const { return _options._do_escape_analysis; }
556 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
557 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; }
558 /** Do boxing elimination. */
559 bool eliminate_boxing() const { return _options._eliminate_boxing; }
560 /** Do aggressive boxing elimination. */
561 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
562 bool should_install_code() const { return _options._install_code; }
563 /** Do locks coarsening. */
564 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
565 bool do_superword() const { return _options._do_superword; }
566
567 bool do_clinit_barriers() const { return _options._for_preload; }
568 bool for_preload() const { return _options._for_preload; }
569
570 // Other fixed compilation parameters.
571 ciMethod* method() const { return _method; }
572 int entry_bci() const { return _entry_bci; }
573 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
574 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
575 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
576 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
577 InlineTree* ilt() const { return _ilt; }
578 address stub_function() const { return _stub_function; }
579 const char* stub_name() const { return _stub_name; }
580 address stub_entry_point() const { return _stub_entry_point; }
581 void set_stub_entry_point(address z) { _stub_entry_point = z; }
582
583 // Control of this compilation.
584 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
585 void set_fixed_slots(int n) { _fixed_slots = n; }
586 int major_progress() const { return _major_progress; }
587 void set_inlining_progress(bool z) { _inlining_progress = z; }
588 int inlining_progress() const { return _inlining_progress; }
589 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
628 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
629 bool do_vector_loop() const { return _do_vector_loop; }
630 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
631 bool use_cmove() const { return _use_cmove; }
632 void set_use_cmove(bool z) { _use_cmove = z; }
633 bool do_aliasing() const { return _do_aliasing; }
634 bool print_assembly() const { return _print_assembly; }
635 void set_print_assembly(bool z) { _print_assembly = z; }
636 bool print_inlining() const { return _print_inlining; }
637 void set_print_inlining(bool z) { _print_inlining = z; }
638 bool print_intrinsics() const { return _print_intrinsics; }
639 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
640 uint max_node_limit() const { return (uint)_max_node_limit; }
641 void set_max_node_limit(uint n) { _max_node_limit = n; }
642 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
643 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
644 bool has_monitors() const { return _has_monitors; }
645 void set_has_monitors(bool v) { _has_monitors = v; }
646 bool has_scoped_access() const { return _has_scoped_access; }
647 void set_has_scoped_access(bool v) { _has_scoped_access = v; }
648 bool has_clinit_barriers() { return _has_clinit_barriers; }
649 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
650
651 // check the CompilerOracle for special behaviours for this compile
652 bool method_has_option(CompileCommandEnum option) {
653 return method() != nullptr && method()->has_option(option);
654 }
655
656 #ifndef PRODUCT
657 uint next_igv_idx() { return _igv_idx++; }
658 bool trace_opto_output() const { return _trace_opto_output; }
659 void print_ideal_ir(const char* phase_name);
660 bool should_print_ideal() const { return _directive->PrintIdealOption; }
661 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
662 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
663 int _in_dump_cnt; // Required for dumping ir nodes.
664 #endif
665 bool has_irreducible_loop() const { return _has_irreducible_loop; }
666 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
667
668 // JSR 292
669 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|