165
166 int clone_idx() const { return _clone_idx; }
167 void set_clone_idx(int x) { _clone_idx = x; }
168 bool is_debug() const { return _debug; }
169 void set_debug(bool debug) { _debug = debug; }
170
171 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
172 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
173 };
174
175 class Options {
176 friend class Compile;
177 friend class VMStructs;
178 private:
179 const bool _subsume_loads; // Load can be matched as part of a larger op.
180 const bool _do_escape_analysis; // Do escape analysis.
181 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
182 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges.
183 const bool _eliminate_boxing; // Do boxing elimination.
184 const bool _do_locks_coarsening; // Do locks coarsening
185 const bool _do_superword; // Do SuperWord
186 const bool _install_code; // Install the code that was compiled
187 public:
188 Options(bool subsume_loads,
189 bool do_escape_analysis,
190 bool do_iterative_escape_analysis,
191 bool do_reduce_allocation_merges,
192 bool eliminate_boxing,
193 bool do_locks_coarsening,
194 bool do_superword,
195 bool install_code) :
196 _subsume_loads(subsume_loads),
197 _do_escape_analysis(do_escape_analysis),
198 _do_iterative_escape_analysis(do_iterative_escape_analysis),
199 _do_reduce_allocation_merges(do_reduce_allocation_merges),
200 _eliminate_boxing(eliminate_boxing),
201 _do_locks_coarsening(do_locks_coarsening),
202 _do_superword(do_superword),
203 _install_code(install_code) {
204 }
205
206 static Options for_runtime_stub() {
207 return Options(
208 /* subsume_loads = */ true,
209 /* do_escape_analysis = */ false,
210 /* do_iterative_escape_analysis = */ false,
211 /* do_reduce_allocation_merges = */ false,
212 /* eliminate_boxing = */ false,
213 /* do_lock_coarsening = */ false,
214 /* do_superword = */ true,
215 /* install_code = */ true
216 );
217 }
218 };
219
220 //------------------------------Compile----------------------------------------
221 // This class defines a top-level Compiler invocation.
222
223 class Compile : public Phase {
224 friend class VMStructs;
225
226 public:
227 // Fixed alias indexes. (See also MergeMemNode.)
228 enum {
229 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
230 AliasIdxBot = 2, // pseudo-index, aliases to everything
231 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
232 };
233
339 bool _do_scheduling; // True if we intend to do scheduling
340 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
341 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
342 bool _use_cmove; // True if CMove should be used without profitability analysis
343 bool _do_aliasing; // True if we intend to do aliasing
344 bool _print_assembly; // True if we should dump assembly code for this compilation
345 bool _print_inlining; // True if we should print inlining for this compilation
346 bool _print_intrinsics; // True if we should print intrinsics for this compilation
347 #ifndef PRODUCT
348 uint _igv_idx; // Counter for IGV node identifiers
349 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
350 bool _trace_opto_output;
351 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
352 #endif
353 bool _has_irreducible_loop; // Found irreducible loops
354 // JSR 292
355 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
356 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
357 bool _has_scoped_access; // For shared scope closure
358 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
359 int _loop_opts_cnt; // loop opts round
360 uint _stress_seed; // Seed for stress testing
361
362 // Compilation environment.
363 Arena _comp_arena; // Arena with lifetime equivalent to Compile
364 void* _barrier_set_state; // Potential GC barrier state for Compile
365 ciEnv* _env; // CI interface
366 DirectiveSet* _directive; // Compiler directive
367 CompileLog* _log; // from CompilerThread
368 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
369 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
370 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
371 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
372 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
373 GrowableArray<Node*> _template_assertion_predicate_opaqs; // List of Opaque4 nodes for Template Assertion Predicates.
374 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
375 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
376 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
377 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
378 ConnectionGraph* _congraph;
582 int compile_id() const { return _compile_id; }
583 DirectiveSet* directive() const { return _directive; }
584
585 // Does this compilation allow instructions to subsume loads? User
586 // instructions that subsume a load may result in an unschedulable
587 // instruction sequence.
588 bool subsume_loads() const { return _options._subsume_loads; }
589 /** Do escape analysis. */
590 bool do_escape_analysis() const { return _options._do_escape_analysis; }
591 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
592 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; }
593 /** Do boxing elimination. */
594 bool eliminate_boxing() const { return _options._eliminate_boxing; }
595 /** Do aggressive boxing elimination. */
596 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
597 bool should_install_code() const { return _options._install_code; }
598 /** Do locks coarsening. */
599 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
600 bool do_superword() const { return _options._do_superword; }
601
602 // Other fixed compilation parameters.
603 ciMethod* method() const { return _method; }
604 int entry_bci() const { return _entry_bci; }
605 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
606 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
607 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
608 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
609 InlineTree* ilt() const { return _ilt; }
610 address stub_function() const { return _stub_function; }
611 const char* stub_name() const { return _stub_name; }
612 address stub_entry_point() const { return _stub_entry_point; }
613 void set_stub_entry_point(address z) { _stub_entry_point = z; }
614
615 // Control of this compilation.
616 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
617 void set_fixed_slots(int n) { _fixed_slots = n; }
618 int major_progress() const { return _major_progress; }
619 void set_inlining_progress(bool z) { _inlining_progress = z; }
620 int inlining_progress() const { return _inlining_progress; }
621 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
660 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
661 bool do_vector_loop() const { return _do_vector_loop; }
662 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
663 bool use_cmove() const { return _use_cmove; }
664 void set_use_cmove(bool z) { _use_cmove = z; }
665 bool do_aliasing() const { return _do_aliasing; }
666 bool print_assembly() const { return _print_assembly; }
667 void set_print_assembly(bool z) { _print_assembly = z; }
668 bool print_inlining() const { return _print_inlining; }
669 void set_print_inlining(bool z) { _print_inlining = z; }
670 bool print_intrinsics() const { return _print_intrinsics; }
671 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
672 uint max_node_limit() const { return (uint)_max_node_limit; }
673 void set_max_node_limit(uint n) { _max_node_limit = n; }
674 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
675 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
676 bool has_monitors() const { return _has_monitors; }
677 void set_has_monitors(bool v) { _has_monitors = v; }
678 bool has_scoped_access() const { return _has_scoped_access; }
679 void set_has_scoped_access(bool v) { _has_scoped_access = v; }
680
681 // check the CompilerOracle for special behaviours for this compile
682 bool method_has_option(CompileCommandEnum option) {
683 return method() != nullptr && method()->has_option(option);
684 }
685
686 #ifndef PRODUCT
687 uint next_igv_idx() { return _igv_idx++; }
688 bool trace_opto_output() const { return _trace_opto_output; }
689 void print_ideal_ir(const char* phase_name);
690 bool should_print_ideal() const { return _directive->PrintIdealOption; }
691 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
692 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
693 int _in_dump_cnt; // Required for dumping ir nodes.
694 #endif
695 bool has_irreducible_loop() const { return _has_irreducible_loop; }
696 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
697
698 // JSR 292
699 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|
165
166 int clone_idx() const { return _clone_idx; }
167 void set_clone_idx(int x) { _clone_idx = x; }
168 bool is_debug() const { return _debug; }
169 void set_debug(bool debug) { _debug = debug; }
170
171 bool same_idx(node_idx_t k1, node_idx_t k2) const { return idx(k1) == idx(k2); }
172 bool same_gen(node_idx_t k1, node_idx_t k2) const { return gen(k1) == gen(k2); }
173 };
174
175 class Options {
176 friend class Compile;
177 friend class VMStructs;
178 private:
179 const bool _subsume_loads; // Load can be matched as part of a larger op.
180 const bool _do_escape_analysis; // Do escape analysis.
181 const bool _do_iterative_escape_analysis; // Do iterative escape analysis.
182 const bool _do_reduce_allocation_merges; // Do try to reduce allocation merges.
183 const bool _eliminate_boxing; // Do boxing elimination.
184 const bool _do_locks_coarsening; // Do locks coarsening
185 const bool _for_preload; // Generate code for preload (before Java method execution), do class init barriers
186 const bool _do_superword; // Do SuperWord
187 const bool _install_code; // Install the code that was compiled
188 public:
189 Options(bool subsume_loads,
190 bool do_escape_analysis,
191 bool do_iterative_escape_analysis,
192 bool do_reduce_allocation_merges,
193 bool eliminate_boxing,
194 bool do_locks_coarsening,
195 bool do_superword,
196 bool for_preload,
197 bool install_code) :
198 _subsume_loads(subsume_loads),
199 _do_escape_analysis(do_escape_analysis),
200 _do_iterative_escape_analysis(do_iterative_escape_analysis),
201 _do_reduce_allocation_merges(do_reduce_allocation_merges),
202 _eliminate_boxing(eliminate_boxing),
203 _do_locks_coarsening(do_locks_coarsening),
204 _for_preload(for_preload),
205 _do_superword(do_superword),
206 _install_code(install_code) {
207 }
208
209 static Options for_runtime_stub() {
210 return Options(
211 /* subsume_loads = */ true,
212 /* do_escape_analysis = */ false,
213 /* do_iterative_escape_analysis = */ false,
214 /* do_reduce_allocation_merges = */ false,
215 /* eliminate_boxing = */ false,
216 /* do_lock_coarsening = */ false,
217 /* for_preload = */ false,
218 /* do_superword = */ true,
219 /* install_code = */ true
220 );
221 }
222 };
223
224 //------------------------------Compile----------------------------------------
225 // This class defines a top-level Compiler invocation.
226
227 class Compile : public Phase {
228 friend class VMStructs;
229
230 public:
231 // Fixed alias indexes. (See also MergeMemNode.)
232 enum {
233 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
234 AliasIdxBot = 2, // pseudo-index, aliases to everything
235 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
236 };
237
343 bool _do_scheduling; // True if we intend to do scheduling
344 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
345 bool _do_vector_loop; // True if allowed to execute loop in parallel iterations
346 bool _use_cmove; // True if CMove should be used without profitability analysis
347 bool _do_aliasing; // True if we intend to do aliasing
348 bool _print_assembly; // True if we should dump assembly code for this compilation
349 bool _print_inlining; // True if we should print inlining for this compilation
350 bool _print_intrinsics; // True if we should print intrinsics for this compilation
351 #ifndef PRODUCT
352 uint _igv_idx; // Counter for IGV node identifiers
353 uint _igv_phase_iter[PHASE_NUM_TYPES]; // Counters for IGV phase iterations
354 bool _trace_opto_output;
355 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
356 #endif
357 bool _has_irreducible_loop; // Found irreducible loops
358 // JSR 292
359 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
360 bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
361 bool _has_scoped_access; // For shared scope closure
362 bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
363 bool _has_clinit_barriers; // True if compiled code has clinit barriers
364 int _loop_opts_cnt; // loop opts round
365 uint _stress_seed; // Seed for stress testing
366
367 // Compilation environment.
368 Arena _comp_arena; // Arena with lifetime equivalent to Compile
369 void* _barrier_set_state; // Potential GC barrier state for Compile
370 ciEnv* _env; // CI interface
371 DirectiveSet* _directive; // Compiler directive
372 CompileLog* _log; // from CompilerThread
373 CHeapStringHolder _failure_reason; // for record_failure/failing pattern
374 CompilationFailureInfo* _first_failure_details; // Details for the first failure happening during compilation
375 GrowableArray<CallGenerator*> _intrinsics; // List of intrinsics.
376 GrowableArray<Node*> _macro_nodes; // List of nodes which need to be expanded before matching.
377 GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
378 GrowableArray<Node*> _template_assertion_predicate_opaqs; // List of Opaque4 nodes for Template Assertion Predicates.
379 GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
380 GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
381 GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
382 GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
383 ConnectionGraph* _congraph;
587 int compile_id() const { return _compile_id; }
588 DirectiveSet* directive() const { return _directive; }
589
590 // Does this compilation allow instructions to subsume loads? User
591 // instructions that subsume a load may result in an unschedulable
592 // instruction sequence.
593 bool subsume_loads() const { return _options._subsume_loads; }
594 /** Do escape analysis. */
595 bool do_escape_analysis() const { return _options._do_escape_analysis; }
596 bool do_iterative_escape_analysis() const { return _options._do_iterative_escape_analysis; }
597 bool do_reduce_allocation_merges() const { return _options._do_reduce_allocation_merges; }
598 /** Do boxing elimination. */
599 bool eliminate_boxing() const { return _options._eliminate_boxing; }
600 /** Do aggressive boxing elimination. */
601 bool aggressive_unboxing() const { return _options._eliminate_boxing && AggressiveUnboxing; }
602 bool should_install_code() const { return _options._install_code; }
603 /** Do locks coarsening. */
604 bool do_locks_coarsening() const { return _options._do_locks_coarsening; }
605 bool do_superword() const { return _options._do_superword; }
606
607 bool do_clinit_barriers() const { return _options._for_preload; }
608 bool for_preload() const { return _options._for_preload; }
609
610 // Other fixed compilation parameters.
611 ciMethod* method() const { return _method; }
612 int entry_bci() const { return _entry_bci; }
613 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
614 bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); }
615 const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; }
616 void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; }
617 InlineTree* ilt() const { return _ilt; }
618 address stub_function() const { return _stub_function; }
619 const char* stub_name() const { return _stub_name; }
620 address stub_entry_point() const { return _stub_entry_point; }
621 void set_stub_entry_point(address z) { _stub_entry_point = z; }
622
623 // Control of this compilation.
624 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
625 void set_fixed_slots(int n) { _fixed_slots = n; }
626 int major_progress() const { return _major_progress; }
627 void set_inlining_progress(bool z) { _inlining_progress = z; }
628 int inlining_progress() const { return _inlining_progress; }
629 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
668 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
669 bool do_vector_loop() const { return _do_vector_loop; }
670 void set_do_vector_loop(bool z) { _do_vector_loop = z; }
671 bool use_cmove() const { return _use_cmove; }
672 void set_use_cmove(bool z) { _use_cmove = z; }
673 bool do_aliasing() const { return _do_aliasing; }
674 bool print_assembly() const { return _print_assembly; }
675 void set_print_assembly(bool z) { _print_assembly = z; }
676 bool print_inlining() const { return _print_inlining; }
677 void set_print_inlining(bool z) { _print_inlining = z; }
678 bool print_intrinsics() const { return _print_intrinsics; }
679 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
680 uint max_node_limit() const { return (uint)_max_node_limit; }
681 void set_max_node_limit(uint n) { _max_node_limit = n; }
682 bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
683 void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
684 bool has_monitors() const { return _has_monitors; }
685 void set_has_monitors(bool v) { _has_monitors = v; }
686 bool has_scoped_access() const { return _has_scoped_access; }
687 void set_has_scoped_access(bool v) { _has_scoped_access = v; }
688 bool has_clinit_barriers() { return _has_clinit_barriers; }
689 void set_has_clinit_barriers(bool z) { _has_clinit_barriers = z; }
690
691 // check the CompilerOracle for special behaviours for this compile
692 bool method_has_option(CompileCommandEnum option) {
693 return method() != nullptr && method()->has_option(option);
694 }
695
696 #ifndef PRODUCT
697 uint next_igv_idx() { return _igv_idx++; }
698 bool trace_opto_output() const { return _trace_opto_output; }
699 void print_ideal_ir(const char* phase_name);
700 bool should_print_ideal() const { return _directive->PrintIdealOption; }
701 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
702 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
703 int _in_dump_cnt; // Required for dumping ir nodes.
704 #endif
705 bool has_irreducible_loop() const { return _has_irreducible_loop; }
706 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
707
708 // JSR 292
709 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
|