< prev index next >

src/hotspot/share/opto/graphKit.hpp

Print this page
@@ -31,10 +31,11 @@
  #include "opto/addnode.hpp"
  #include "opto/callnode.hpp"
  #include "opto/cfgnode.hpp"
  #include "opto/compile.hpp"
  #include "opto/divnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/mulnode.hpp"
  #include "opto/phaseX.hpp"
  #include "opto/subnode.hpp"
  #include "opto/type.hpp"
  #include "runtime/deoptimization.hpp"

@@ -64,10 +65,13 @@
    SafePointNode*    _map;       // Parser map from JVM to Nodes
    SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
    int               _bci;       // JVM Bytecode Pointer
    ciMethod*         _method;    // JVM Current Method
    BarrierSetC2*     _barrier_set;
+ #ifdef ASSERT
+   uint              _worklist_size;
+ #endif
  
   private:
    int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
  
   private:

@@ -76,27 +80,34 @@
      return _map;
    }
  
   public:
    GraphKit();                   // empty constructor
-   GraphKit(JVMState* jvms);     // the JVM state on which to operate
+   GraphKit(JVMState* jvms, PhaseGVN* gvn = nullptr);     // the JVM state on which to operate
  
  #ifdef ASSERT
    ~GraphKit() {
      assert(failing_internal() || !has_exceptions(),
             "unless compilation failed, user must call transfer_exceptions_into_jvms");
+ #if 0
+     // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
+     // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
+     // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
+     assert((_gvn.is_IterGVN() == nullptr) || (_gvn.C->for_igvn()->size() == _worklist_size),
+            "GraphKit should not modify _for_igvn worklist after parsing");
+ #endif
    }
  #endif
  
    virtual Parse*          is_Parse()          const { return nullptr; }
    virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; }
  
    ciEnv*        env()               const { return _env; }
    PhaseGVN&     gvn()               const { return _gvn; }
    void*         barrier_set_state() const { return C->barrier_set_state(); }
  
-   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
+   void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
    void remove_for_igvn(Node* n) const { C->remove_for_igvn(n); }
  
    // Handy well-known nodes:
    Node*         null()          const { return zerocon(T_OBJECT); }
    Node*         top()           const { return C->top(); }

@@ -360,16 +371,16 @@
    // Return the value cast to not-null.
    // Be clever about equivalent dominating null checks.
    Node* null_check_common(Node* value, BasicType type,
                            bool assert_null = false,
                            Node* *null_control = nullptr,
-                           bool speculative = false);
+                           bool speculative = false,
+                           bool is_init_check = false);
    Node* null_check(Node* value, BasicType type = T_OBJECT) {
      return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null());
    }
    Node* null_check_receiver() {
-     assert(argument(0)->bottom_type()->isa_ptr(), "must be");
      return null_check(argument(0));
    }
    Node* zero_check_int(Node* value) {
      assert(value->bottom_type()->basic_type() == T_INT,
             "wrong type: %s", type2name(value->bottom_type()->basic_type()));

@@ -605,18 +616,20 @@
                          Node* adr,   // actual address to store val at
                          const TypePtr* adr_type,
                          Node* val,
                          const Type* val_type,
                          BasicType bt,
-                         DecoratorSet decorators);
+                         DecoratorSet decorators,
+                         bool safe_for_replace = true);
  
    Node* access_load_at(Node* obj,   // containing obj
                         Node* adr,   // actual address to load val at
                         const TypePtr* adr_type,
                         const Type* val_type,
                         BasicType bt,
-                        DecoratorSet decorators);
+                        DecoratorSet decorators,
+                        Node* ctl = nullptr);
  
    Node* access_load(Node* adr,   // actual address to load val at
                      const Type* val_type,
                      BasicType bt,
                      DecoratorSet decorators);

@@ -704,11 +717,11 @@
      return n;
    }
  
    // Fill in argument edges for the call from argument(0), argument(1), ...
    // (The next step is to call set_edges_for_java_call.)
-   void  set_arguments_for_java_call(CallJavaNode* call);
+   void  set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline = false);
  
    // Fill in non-argument edges for the call.
    // Transform the call, and update the basics: control, i_o, memory.
    // (The next step is usually to call set_results_for_java_call.)
    void set_edges_for_java_call(CallJavaNode* call,

@@ -843,20 +856,27 @@
    // and the reflective instance-of call.
    Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
  
    // Generate a check-cast idiom.  Used by both the check-cast bytecode
    // and the array-store bytecode
-   Node* gen_checkcast( Node *subobj, Node* superkls,
-                        Node* *failure_control = nullptr );
+   Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = nullptr, bool null_free = false);
+ 
+   // Inline types
+   Node* mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock = true);
+   Node* inline_type_test(Node* obj, bool is_inline = true);
+   Node* flat_array_test(Node* array_or_klass, bool flat = true);
+   Node* null_free_array_test(Node* array, bool null_free = true);
+   Node* inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false);
  
    Node* gen_subtype_check(Node* obj, Node* superklass);
  
    // Exact type check used for predicted calls and casts.
    // Rewrites (*casted_receiver) to be casted to the stronger type.
    // (Caller is responsible for doing replace_in_map.)
    Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                              Node* *casted_receiver);
+   Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
  
    // Inexact type check used for predicted calls.
    Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
                                 Node** casted_receiver);
  

@@ -866,11 +886,12 @@
                                    bool deoptimize_on_exception=false);
    Node* get_layout_helper(Node* klass_node, jint& constant_value);
    Node* new_instance(Node* klass_node,
                       Node* slow_test = nullptr,
                       Node* *return_size_val = nullptr,
-                      bool deoptimize_on_exception = false);
+                      bool deoptimize_on_exception = false,
+                      InlineTypeNode* inline_type_node = nullptr);
    Node* new_array(Node* klass_node, Node* count_val, int nargs,
                    Node* *return_size_val = nullptr,
                    bool deoptimize_on_exception = false);
  
    // java.lang.String helpers

@@ -903,10 +924,11 @@
  
    void add_parse_predicates(int nargs = 0);
    void add_parse_predicate(Deoptimization::DeoptReason reason, int nargs);
  
    Node* make_constant_from_field(ciField* field, Node* obj);
+   Node* load_mirror_from_klass(Node* klass);
  
    // Vector API support (implemented in vectorIntrinsics.cpp)
    Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
    Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool shuffle_to_vector = false);
    Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
< prev index next >