< prev index next >

src/hotspot/share/opto/compile.hpp

Print this page
@@ -49,10 +49,11 @@
  class AbstractLockNode;
  class AddPNode;
  class Block;
  class Bundle;
  class CallGenerator;
+ class CallNode;
  class CloneMap;
  class ConnectionGraph;
  class IdealGraphPrinter;
  class InlineTree;
  class Int_Array;

@@ -87,10 +88,11 @@
  class TypePtr;
  class TypeOopPtr;
  class TypeFunc;
  class TypeVect;
  class Unique_Node_List;
+ class InlineTypeBaseNode;
  class nmethod;
  class Node_Stack;
  struct Final_Reshape_Counts;
  
  enum LoopOptsMode {

@@ -303,10 +305,13 @@
    // JSR 292
    bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
    RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
    int                   _loop_opts_cnt;         // loop opts round
    bool                  _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
+   bool                  _has_flattened_accesses; // Any known flattened array accesses?
+   bool                  _flattened_accesses_share_alias; // Initially all flattened array share a single slice
+   bool                  _scalarize_in_safepoints; // Scalarize inline types in safepoint debug info
    uint                  _stress_seed;           // Seed for stress testing
  
    // Compilation environment.
    Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
    void*                 _barrier_set_state;     // Potential GC barrier state for Compile

@@ -318,10 +323,11 @@
    GrowableArray<Node*>  _macro_nodes;           // List of nodes which need to be expanded before matching.
    GrowableArray<Node*>  _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
    GrowableArray<Node*>  _skeleton_predicate_opaqs; // List of Opaque4 nodes for the loop skeleton predicates.
    GrowableArray<Node*>  _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
    GrowableArray<Node*>  _for_post_loop_igvn;    // List of nodes for IGVN after loop opts are over
+   GrowableArray<Node*>  _inline_type_nodes;     // List of InlineType nodes
    GrowableArray<Node_List*> _coarsened_locks;   // List of coarsened Lock and Unlock nodes
    ConnectionGraph*      _congraph;
  #ifndef PRODUCT
    IdealGraphPrinter*    _printer;
    static IdealGraphPrinter* _debug_file_printer;

@@ -593,10 +599,19 @@
    bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
    uint              max_node_limit() const       { return (uint)_max_node_limit; }
    void          set_max_node_limit(uint n)       { _max_node_limit = n; }
    bool              clinit_barrier_on_entry()       { return _clinit_barrier_on_entry; }
    void          set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
+   void          set_flattened_accesses()         { _has_flattened_accesses = true; }
+   bool          flattened_accesses_share_alias() const { return _flattened_accesses_share_alias; }
+   void          set_flattened_accesses_share_alias(bool z) { _flattened_accesses_share_alias = z; }
+   bool          scalarize_in_safepoints() const { return _scalarize_in_safepoints; }
+   void          set_scalarize_in_safepoints(bool z) { _scalarize_in_safepoints = z; }
+ 
+   // Support for scalarized inline type calling convention
+   bool              has_scalarized_args() const  { return _method != NULL && _method->has_scalarized_args(); }
+   bool              needs_stack_repair()  const  { return _method != NULL && _method->get_Method()->c2_needs_stack_repair(); }
  
    // check the CompilerOracle for special behaviours for this compile
    bool          method_has_option(enum CompileCommand option) {
      return method() != NULL && method()->has_option(option);
    }

@@ -717,10 +732,17 @@
  
    void record_for_post_loop_opts_igvn(Node* n);
    void remove_from_post_loop_opts_igvn(Node* n);
    void process_for_post_loop_opts_igvn(PhaseIterGVN& igvn);
  
+   // Keep track of inline type nodes for later processing
+   void add_inline_type(Node* n);
+   void remove_inline_type(Node* n);
+   void process_inline_types(PhaseIterGVN &igvn, bool remove = false);
+ 
+   void adjust_flattened_array_access_aliases(PhaseIterGVN& igvn);
+ 
    void sort_macro_nodes();
  
    // remove the opaque nodes that protect the predicates so that the unused checks and
    // uncommon traps will be eliminated from the graph.
    void cleanup_loop_predicates(PhaseIterGVN &igvn);

@@ -856,15 +878,15 @@
      _last_tf_m = m;
      _last_tf = tf;
    }
  
    AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
-   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
+   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); }
    bool         have_alias_type(const TypePtr* adr_type);
    AliasType*        alias_type(ciField*         field);
  
-   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
+   int               get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, NULL, uncached)->index(); }
    const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
    int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
  
    // Building nodes
    void              rethrow_exceptions(JVMState* jvms);

@@ -930,11 +952,11 @@
    void gvn_replace_by(Node* n, Node* nn);
  
  
    void              identify_useful_nodes(Unique_Node_List &useful);
    void              update_dead_node_list(Unique_Node_List &useful);
-   void              remove_useless_nodes (Unique_Node_List &useful);
+   void              disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist);
  
    void              remove_useless_node(Node* dead);
  
    // Record this CallGenerator for inlining at the end of parsing.
    void              add_late_inline(CallGenerator* cg)        {

@@ -1095,11 +1117,11 @@
  
    // Management of the AliasType table.
    void grow_alias_types();
    AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
    const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
-   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
+   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false);
  
    void verify_top(Node*) const PRODUCT_RETURN;
  
    // Intrinsic setup.
    CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor

@@ -1168,11 +1190,13 @@
                                Node* ctrl = NULL);
  
    // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
    static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false);
  
-   // Auxiliary methods for randomized fuzzing/stressing
+   Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b);
+ 
+   // Auxiliary method for randomized fuzzing/stressing
    int random();
    bool randomized_select(int count);
  
    // supporting clone_map
    CloneMap&     clone_map();
< prev index next >