< prev index next >

src/hotspot/share/opto/graphKit.hpp

Print this page
*** 31,10 ***
--- 31,11 ---
  #include "opto/addnode.hpp"
  #include "opto/callnode.hpp"
  #include "opto/cfgnode.hpp"
  #include "opto/compile.hpp"
  #include "opto/divnode.hpp"
+ #include "opto/inlinetypenode.hpp"
  #include "opto/mulnode.hpp"
  #include "opto/phaseX.hpp"
  #include "opto/subnode.hpp"
  #include "opto/type.hpp"
  #include "runtime/deoptimization.hpp"

*** 64,10 ***
--- 65,13 ---
    SafePointNode*    _map;       // Parser map from JVM to Nodes
    SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
    int               _bci;       // JVM Bytecode Pointer
    ciMethod*         _method;    // JVM Current Method
    BarrierSetC2*     _barrier_set;
+ #ifdef ASSERT
+   uint              _worklist_size;
+ #endif
  
   private:
    int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
  
   private:

*** 76,26 ***
      return _map;
    }
  
   public:
    GraphKit();                   // empty constructor
!   GraphKit(JVMState* jvms);     // the JVM state on which to operate
  
  #ifdef ASSERT
    ~GraphKit() {
      assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
    }
  #endif
  
    virtual Parse*          is_Parse()          const { return NULL; }
    virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  
    ciEnv*        env()               const { return _env; }
    PhaseGVN&     gvn()               const { return _gvn; }
    void*         barrier_set_state() const { return C->barrier_set_state(); }
  
!   void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
  
    // Handy well-known nodes:
    Node*         null()          const { return zerocon(T_OBJECT); }
    Node*         top()           const { return C->top(); }
    RootNode*     root()          const { return C->root(); }
--- 80,31 ---
      return _map;
    }
  
   public:
    GraphKit();                   // empty constructor
!   GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL);     // the JVM state on which to operate
  
  #ifdef ASSERT
    ~GraphKit() {
      assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
+     // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN
+     // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies,
+     // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit.
+     assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size),
+            "GraphKit should not modify _for_igvn worklist after parsing");
    }
  #endif
  
    virtual Parse*          is_Parse()          const { return NULL; }
    virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
  
    ciEnv*        env()               const { return _env; }
    PhaseGVN&     gvn()               const { return _gvn; }
    void*         barrier_set_state() const { return C->barrier_set_state(); }
  
!   void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); }
  
    // Handy well-known nodes:
    Node*         null()          const { return zerocon(T_OBJECT); }
    Node*         top()           const { return C->top(); }
    RootNode*     root()          const { return C->root(); }

*** 356,11 ***
    // Return the value cast to not-null.
    // Be clever about equivalent dominating null checks.
    Node* null_check_common(Node* value, BasicType type,
                            bool assert_null = false,
                            Node* *null_control = NULL,
!                           bool speculative = false);
    Node* null_check(Node* value, BasicType type = T_OBJECT) {
      return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
    }
    Node* null_check_receiver() {
      assert(argument(0)->bottom_type()->isa_ptr(), "must be");
--- 365,12 ---
    // Return the value cast to not-null.
    // Be clever about equivalent dominating null checks.
    Node* null_check_common(Node* value, BasicType type,
                            bool assert_null = false,
                            Node* *null_control = NULL,
!                           bool speculative = false,
+                           bool is_init_check = false);
    Node* null_check(Node* value, BasicType type = T_OBJECT) {
      return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null());
    }
    Node* null_check_receiver() {
      assert(argument(0)->bottom_type()->isa_ptr(), "must be");

*** 596,18 ***
                          Node* adr,   // actual adress to store val at
                          const TypePtr* adr_type,
                          Node* val,
                          const Type* val_type,
                          BasicType bt,
!                         DecoratorSet decorators);
  
    Node* access_load_at(Node* obj,   // containing obj
                         Node* adr,   // actual adress to load val at
                         const TypePtr* adr_type,
                         const Type* val_type,
                         BasicType bt,
!                        DecoratorSet decorators);
  
    Node* access_load(Node* adr,   // actual adress to load val at
                      const Type* val_type,
                      BasicType bt,
                      DecoratorSet decorators);
--- 606,20 ---
                          Node* adr,   // actual adress to store val at
                          const TypePtr* adr_type,
                          Node* val,
                          const Type* val_type,
                          BasicType bt,
!                         DecoratorSet decorators,
+                         bool safe_for_replace = true);
  
    Node* access_load_at(Node* obj,   // containing obj
                         Node* adr,   // actual adress to load val at
                         const TypePtr* adr_type,
                         const Type* val_type,
                         BasicType bt,
!                        DecoratorSet decorators,
+                        Node* ctl = NULL);
  
    Node* access_load(Node* adr,   // actual adress to load val at
                      const Type* val_type,
                      BasicType bt,
                      DecoratorSet decorators);

*** 681,25 ***
  
    //---------- help for generating calls --------------
  
    // Do a null check on the receiver as it would happen before the call to
    // callee (with all arguments still on the stack).
!   Node* null_check_receiver_before_call(ciMethod* callee) {
      assert(!callee->is_static(), "must be a virtual method");
      // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
      // Use callsite signature always.
      ciMethod* declared_method = method()->get_method_at_bci(bci());
      const int nargs = declared_method->arg_size();
      inc_sp(nargs);
      Node* n = null_check_receiver();
      dec_sp(nargs);
      return n;
    }
  
    // Fill in argument edges for the call from argument(0), argument(1), ...
    // (The next step is to call set_edges_for_java_call.)
!   void  set_arguments_for_java_call(CallJavaNode* call);
  
    // Fill in non-argument edges for the call.
    // Transform the call, and update the basics: control, i_o, memory.
    // (The next step is usually to call set_results_for_java_call.)
    void set_edges_for_java_call(CallJavaNode* call,
--- 693,44 ---
  
    //---------- help for generating calls --------------
  
    // Do a null check on the receiver as it would happen before the call to
    // callee (with all arguments still on the stack).
!   Node* null_check_receiver_before_call(ciMethod* callee, bool replace_value = true) {
      assert(!callee->is_static(), "must be a virtual method");
+     if (argument(0)->is_InlineType()) {
+       return argument(0);
+     }
      // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
      // Use callsite signature always.
      ciMethod* declared_method = method()->get_method_at_bci(bci());
      const int nargs = declared_method->arg_size();
      inc_sp(nargs);
      Node* n = null_check_receiver();
+     // TODO Remove this code once InlineTypeNodes are replaced by InlineTypePtrNodes
+     set_argument(0, n);
      dec_sp(nargs);
+     // Scalarize inline type receiver
+     const Type* recv_type = gvn().type(n);
+     if (recv_type->is_inlinetypeptr()) {
+       assert(!recv_type->maybe_null(), "should never be null");
+       Node* vt = InlineTypeNode::make_from_oop(this, n, recv_type->inline_klass());
+       set_argument(0, vt);
+       if (replace_value && is_Parse()) {
+         // Only replace in map if we are not incrementally inlining because we
+         // share a map with the caller which might expect the inline type as oop.
+         assert(!Compile::current()->inlining_incrementally(), "sanity");
+         replace_in_map(n, vt);
+       }
+       n = vt;
+     }
      return n;
    }
  
    // Fill in argument edges for the call from argument(0), argument(1), ...
    // (The next step is to call set_edges_for_java_call.)
!   void  set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline = false);
  
    // Fill in non-argument edges for the call.
    // Transform the call, and update the basics: control, i_o, memory.
    // (The next step is usually to call set_results_for_java_call.)
    void set_edges_for_java_call(CallJavaNode* call,

*** 840,20 ***
    // and the reflective instance-of call.
    Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
  
    // Generate a check-cast idiom.  Used by both the check-cast bytecode
    // and the array-store bytecode
!   Node* gen_checkcast( Node *subobj, Node* superkls,
!                        Node* *failure_control = NULL );
  
    Node* gen_subtype_check(Node* obj, Node* superklass);
  
    // Exact type check used for predicted calls and casts.
    // Rewrites (*casted_receiver) to be casted to the stronger type.
    // (Caller is responsible for doing replace_in_map.)
    Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                              Node* *casted_receiver);
  
    // Inexact type check used for predicted calls.
    Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
                                 Node** casted_receiver);
  
--- 871,28 ---
    // and the reflective instance-of call.
    Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
  
    // Generate a check-cast idiom.  Used by both the check-cast bytecode
    // and the array-store bytecode
!   Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool null_free = false);
! 
+   // Inline types
+   Node* inline_type_test(Node* obj, bool is_inline = true);
+   Node* is_val_mirror(Node* mirror);
+   Node* array_lh_test(Node* kls, jint mask, jint val, bool eq = true);
+   Node* flat_array_test(Node* ary, bool flat = true);
+   Node* null_free_array_test(Node* klass, bool null_free = true);
+   Node* inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false);
  
    Node* gen_subtype_check(Node* obj, Node* superklass);
  
    // Exact type check used for predicted calls and casts.
    // Rewrites (*casted_receiver) to be casted to the stronger type.
    // (Caller is responsible for doing replace_in_map.)
    Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
                              Node* *casted_receiver);
+   Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob);
  
    // Inexact type check used for predicted calls.
    Node* subtype_check_receiver(Node* receiver, ciKlass* klass,
                                 Node** casted_receiver);
  

*** 863,11 ***
                                    bool deoptimize_on_exception=false);
    Node* get_layout_helper(Node* klass_node, jint& constant_value);
    Node* new_instance(Node* klass_node,
                       Node* slow_test = NULL,
                       Node* *return_size_val = NULL,
!                      bool deoptimize_on_exception = false);
    Node* new_array(Node* klass_node, Node* count_val, int nargs,
                    Node* *return_size_val = NULL,
                    bool deoptimize_on_exception = false);
  
    // java.lang.String helpers
--- 902,12 ---
                                    bool deoptimize_on_exception=false);
    Node* get_layout_helper(Node* klass_node, jint& constant_value);
    Node* new_instance(Node* klass_node,
                       Node* slow_test = NULL,
                       Node* *return_size_val = NULL,
!                      bool deoptimize_on_exception = false,
+                      InlineTypeBaseNode* inline_type_node = NULL);
    Node* new_array(Node* klass_node, Node* count_val, int nargs,
                    Node* *return_size_val = NULL,
                    bool deoptimize_on_exception = false);
  
    // java.lang.String helpers

*** 900,10 ***
--- 940,11 ---
  
    void add_empty_predicates(int nargs = 0);
    void add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
  
    Node* make_constant_from_field(ciField* field, Node* obj);
+   Node* load_mirror_from_klass(Node* klass);
  
    // Vector API support (implemented in vectorIntrinsics.cpp)
    Node* box_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool deoptimize_on_exception = false);
    Node* unbox_vector(Node* in, const TypeInstPtr* vbox_type, BasicType elem_bt, int num_elem, bool shuffle_to_vector = false);
    Node* vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem);
< prev index next >