< prev index next > src/hotspot/share/opto/compile.hpp
Print this page
class AbstractLockNode;
class AddPNode;
class Block;
class Bundle;
class CallGenerator;
+ class CallNode;
class CallStaticJavaNode;
class CloneMap;
class CompilationFailureInfo;
class ConnectionGraph;
class IdealGraphPrinter;
class TypeFunc;
class TypeVect;
class Type_Array;
class Unique_Node_List;
class UnstableIfTrap;
+ class InlineTypeNode;
class nmethod;
class Node_Stack;
struct Final_Reshape_Counts;
class VerifyMeetResult;
bool _has_split_ifs; // True if the method _may_ have some split-if
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
bool _has_boxed_value; // True if a boxed object is allocated
bool _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
+ bool _has_circular_inline_type; // True if method loads an inline type with a circular, non-flat field
uint _max_vector_size; // Maximum size of generated vectors
bool _clear_upper_avx; // Clear upper bits of ymm registers using vzeroupper
uint _trap_hist[trapHistLength]; // Cumulative traps
bool _trap_can_recompile; // Have we emitted a recompiling trap?
uint _decompile_count; // Cumulative decompilation counts.
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
bool _has_monitors; // Metadata transfered to nmethod to enable Continuations lock-detection fastpath
bool _has_scoped_access; // For shared scope closure
bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
int _loop_opts_cnt; // loop opts round
+ bool _has_flat_accesses; // Any known flat array accesses?
+ bool _flat_accesses_share_alias; // Initially all flat array share a single slice
+ bool _scalarize_in_safepoints; // Scalarize inline types in safepoint debug info
uint _stress_seed; // Seed for stress testing
// Compilation environment.
Arena _comp_arena; // Arena with lifetime equivalent to Compile
void* _barrier_set_state; // Potential GC barrier state for Compile
GrowableArray<ParsePredicateNode*> _parse_predicates; // List of Parse Predicates.
// List of OpaqueTemplateAssertionPredicateNode nodes for Template Assertion Predicates.
GrowableArray<Node*> _template_assertion_predicate_opaqs;
GrowableArray<Node*> _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
GrowableArray<Node*> _for_post_loop_igvn; // List of nodes for IGVN after loop opts are over
+ GrowableArray<Node*> _inline_type_nodes; // List of InlineType nodes
GrowableArray<UnstableIfTrap*> _unstable_if_traps; // List of ifnodes after IGVN
GrowableArray<Node_List*> _coarsened_locks; // List of coarsened Lock and Unlock nodes
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _igv_printer;
void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
bool has_boxed_value() const { return _has_boxed_value; }
void set_has_boxed_value(bool z) { _has_boxed_value = z; }
bool has_reserved_stack_access() const { return _has_reserved_stack_access; }
void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
+ bool has_circular_inline_type() const { return _has_circular_inline_type; }
+ void set_has_circular_inline_type(bool z) { _has_circular_inline_type = z; }
uint max_vector_size() const { return _max_vector_size; }
void set_max_vector_size(uint s) { _max_vector_size = s; }
bool clear_upper_avx() const { return _clear_upper_avx; }
void set_clear_upper_avx(bool s) { _clear_upper_avx = s; }
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
void set_print_intrinsics(bool z) { _print_intrinsics = z; }
uint max_node_limit() const { return (uint)_max_node_limit; }
void set_max_node_limit(uint n) { _max_node_limit = n; }
bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
+ void set_flat_accesses() { _has_flat_accesses = true; }
+ bool flat_accesses_share_alias() const { return _flat_accesses_share_alias; }
+ void set_flat_accesses_share_alias(bool z) { _flat_accesses_share_alias = z; }
+ bool scalarize_in_safepoints() const { return _scalarize_in_safepoints; }
+ void set_scalarize_in_safepoints(bool z) { _scalarize_in_safepoints = z; }
+
+ // Support for scalarized inline type calling convention
+ bool has_scalarized_args() const { return _method != nullptr && _method->has_scalarized_args(); }
+ bool needs_stack_repair() const { return _method != nullptr && _method->get_Method()->c2_needs_stack_repair(); }
+
bool has_monitors() const { return _has_monitors; }
void set_has_monitors(bool v) { _has_monitors = v; }
bool has_scoped_access() const { return _has_scoped_access; }
void set_has_scoped_access(bool v) { _has_scoped_access = v; }
void record_for_post_loop_opts_igvn(Node* n);
void remove_from_post_loop_opts_igvn(Node* n);
void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
+ // Keep track of inline type nodes for later processing
+ void add_inline_type(Node* n);
+ void remove_inline_type(Node* n);
+ void process_inline_types(PhaseIterGVN &igvn, bool remove = false);
+
+ void adjust_flat_array_access_aliases(PhaseIterGVN& igvn);
+
void record_unstable_if_trap(UnstableIfTrap* trap);
bool remove_unstable_if_trap(CallStaticJavaNode* unc, bool yield);
void remove_useless_unstable_if_traps(Unique_Node_List &useful);
void process_for_unstable_if_traps(PhaseIterGVN& igvn);
_last_tf_m = m;
_last_tf = tf;
}
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
- AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr) { return find_alias_type(adr_type, false, field); }
+ AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); }
bool have_alias_type(const TypePtr* adr_type);
AliasType* alias_type(ciField* field);
- int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
+ int get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, nullptr, uncached)->index(); }
const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
// Building nodes
void rethrow_exceptions(JVMState* jvms);
// Management of the AliasType table.
void grow_alias_types();
AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
- AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
+ AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false);
void verify_top(Node*) const PRODUCT_RETURN;
// Intrinsic setup.
CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
Node* ctrl = nullptr);
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
- // Auxiliary methods for randomized fuzzing/stressing
+ Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b);
+
+ // Auxiliary method for randomized fuzzing/stressing
int random();
bool randomized_select(int count);
// seed random number generation and log the seed for repeatability.
void initialize_stress_seed(const DirectiveSet* directive);
< prev index next >