< prev index next >

src/hotspot/share/opto/graphKit.hpp

Print this page

        

*** 35,44 **** --- 35,45 ---- #include "opto/divnode.hpp" #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" #include "opto/subnode.hpp" #include "opto/type.hpp" + #include "opto/valuetypenode.hpp" #include "runtime/deoptimization.hpp" class BarrierSetC2; class FastLockNode; class FastUnlockNode;
*** 64,73 **** --- 65,77 ---- SafePointNode* _map; // Parser map from JVM to Nodes SafePointNode* _exceptions;// Parser map(s) for exception state(s) int _bci; // JVM Bytecode Pointer ciMethod* _method; // JVM Current Method BarrierSetC2* _barrier_set; + #ifdef ASSERT + uint _worklist_size; + #endif private: int _sp; // JVM Expression Stack Pointer; don't modify directly! private:
*** 76,101 **** return _map; } public: GraphKit(); // empty constructor ! GraphKit(JVMState* jvms); // the JVM state on which to operate #ifdef ASSERT ~GraphKit() { assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); } #endif virtual Parse* is_Parse() const { return NULL; } virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } ciEnv* env() const { return _env; } PhaseGVN& gvn() const { return _gvn; } void* barrier_set_state() const { return C->barrier_set_state(); } ! void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } Node* top() const { return C->top(); } RootNode* root() const { return C->root(); } --- 80,110 ---- return _map; } public: GraphKit(); // empty constructor ! GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL); // the JVM state on which to operate #ifdef ASSERT ~GraphKit() { assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); + // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN + // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies, + // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit. + assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size), + "GraphKit should not modify _for_igvn worklist after parsing"); } #endif virtual Parse* is_Parse() const { return NULL; } virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } ciEnv* env() const { return _env; } PhaseGVN& gvn() const { return _gvn; } void* barrier_set_state() const { return C->barrier_set_state(); } ! void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); } // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } Node* top() const { return C->top(); } RootNode* root() const { return C->root(); }
*** 367,376 **** --- 376,387 ---- // Return the value cast to null, and be clever about dominating checks. Node* null_assert(Node* value, BasicType type = T_OBJECT) { return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null()); } + Node* null2default(Node* value, ciValueKlass* vk = NULL); + // Check if value is null and abort if it is Node* must_be_not_null(Node* value, bool do_replace_in_map); // Null check oop. Return null-path control into (*null_control). // Return a cast-not-null node which depends on the not-null control.
*** 583,600 **** Node* adr, // actual adress to store val at const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, ! DecoratorSet decorators); Node* access_load_at(Node* obj, // containing obj Node* adr, // actual adress to load val at const TypePtr* adr_type, const Type* val_type, BasicType bt, ! DecoratorSet decorators); Node* access_load(Node* adr, // actual adress to load val at const Type* val_type, BasicType bt, DecoratorSet decorators); --- 594,614 ---- Node* adr, // actual adress to store val at const TypePtr* adr_type, Node* val, const Type* val_type, BasicType bt, ! DecoratorSet decorators, ! bool deoptimize_on_exception = false, ! bool safe_for_replace = true); Node* access_load_at(Node* obj, // containing obj Node* adr, // actual adress to load val at const TypePtr* adr_type, const Type* val_type, BasicType bt, ! DecoratorSet decorators, ! Node* ctl = NULL); Node* access_load(Node* adr, // actual adress to load val at const Type* val_type, BasicType bt, DecoratorSet decorators);
*** 635,645 **** Node* new_val, const Type* value_type, BasicType bt, DecoratorSet decorators); ! void access_clone(Node* src, Node* dst, Node* size, bool is_array); Node* access_resolve(Node* n, DecoratorSet decorators); // Return addressing for an array element. Node* array_element_address(Node* ary, Node* idx, BasicType elembt, --- 649,659 ---- Node* new_val, const Type* value_type, BasicType bt, DecoratorSet decorators); ! void access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array); Node* access_resolve(Node* n, DecoratorSet decorators); // Return addressing for an array element. Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
*** 670,694 **** //---------- help for generating calls -------------- // Do a null check on the receiver as it would happen before the call to // callee (with all arguments still on the stack). ! Node* null_check_receiver_before_call(ciMethod* callee) { assert(!callee->is_static(), "must be a virtual method"); // Callsite signature can be different from actual method being called (i.e _linkTo* sites). // Use callsite signature always. ciMethod* declared_method = method()->get_method_at_bci(bci()); const int nargs = declared_method->arg_size(); inc_sp(nargs); Node* n = null_check_receiver(); dec_sp(nargs); return n; } // Fill in argument edges for the call from argument(0), argument(1), ... // (The next step is to call set_edges_for_java_call.) ! void set_arguments_for_java_call(CallJavaNode* call); // Fill in non-argument edges for the call. // Transform the call, and update the basics: control, i_o, memory. // (The next step is usually to call set_results_for_java_call.) void set_edges_for_java_call(CallJavaNode* call, --- 684,724 ---- //---------- help for generating calls -------------- // Do a null check on the receiver as it would happen before the call to // callee (with all arguments still on the stack). ! Node* null_check_receiver_before_call(ciMethod* callee, bool replace_value = true) { assert(!callee->is_static(), "must be a virtual method"); + if (argument(0)->is_ValueType()) { + return argument(0); + } // Callsite signature can be different from actual method being called (i.e _linkTo* sites). // Use callsite signature always. ciMethod* declared_method = method()->get_method_at_bci(bci()); const int nargs = declared_method->arg_size(); inc_sp(nargs); Node* n = null_check_receiver(); dec_sp(nargs); + // Scalarize value type receiver + const Type* recv_type = gvn().type(n); + if (recv_type->is_valuetypeptr() && recv_type->value_klass()->is_scalarizable()) { + assert(!recv_type->maybe_null(), "should never be null"); + ValueTypeNode* vt = ValueTypeNode::make_from_oop(this, n, recv_type->value_klass()); + set_argument(0, vt); + if (replace_value && !Compile::current()->inlining_incrementally()) { + // Only replace in map if we are not incrementally inlining because we + // share a map with the caller which might expect the value type as oop. + replace_in_map(n, vt); + } + n = vt; + } return n; } // Fill in argument edges for the call from argument(0), argument(1), ... // (The next step is to call set_edges_for_java_call.) ! void set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining = false); // Fill in non-argument edges for the call. // Transform the call, and update the basics: control, i_o, memory. // (The next step is usually to call set_results_for_java_call.) void set_edges_for_java_call(CallJavaNode* call,
*** 823,834 **** // and the reflective instance-of call. Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode ! Node* gen_checkcast( Node *subobj, Node* superkls, ! Node* *failure_control = NULL ); Node* gen_subtype_check(Node* subklass, Node* superklass) { MergeMemNode* mem = merged_memory(); Node* ctrl = control(); Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn); --- 853,871 ---- // and the reflective instance-of call. Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode ! Node* gen_checkcast(Node *subobj, Node* superkls, Node* *failure_control = NULL, bool never_null = false); ! ! Node* is_always_locked(Node* obj); ! Node* is_value_mirror(Node* mirror); ! void gen_value_type_guard(Node* obj, int nargs = 0); ! Node* gen_null_free_array_check(Node* ary); ! Node* gen_value_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace = false); ! Node* load_lh_array_tag(Node* kls); ! Node* gen_lh_array_test(Node* kls, unsigned int lh_value); Node* gen_subtype_check(Node* subklass, Node* superklass) { MergeMemNode* mem = merged_memory(); Node* ctrl = control(); Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn);
*** 839,848 **** --- 876,886 ---- // Exact type check used for predicted calls and casts. // Rewrites (*casted_receiver) to be casted to the stronger type. // (Caller is responsible for doing replace_in_map.) Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, Node* *casted_receiver); + Node* type_check(Node* recv_klass, const TypeKlassPtr* tklass, float prob); // Inexact type check used for predicted calls. Node* subtype_check_receiver(Node* receiver, ciKlass* klass, Node** casted_receiver);
*** 852,865 **** bool deoptimize_on_exception=false); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, Node* slow_test = NULL, Node* *return_size_val = NULL, ! bool deoptimize_on_exception = false); Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* *return_size_val = NULL, ! bool deoptimize_on_exception = false); // java.lang.String helpers Node* load_String_length(Node* str, bool set_ctrl); Node* load_String_value(Node* str, bool set_ctrl); Node* load_String_coder(Node* str, bool set_ctrl); --- 890,905 ---- bool deoptimize_on_exception=false); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, Node* slow_test = NULL, Node* *return_size_val = NULL, ! bool deoptimize_on_exception = false, ! ValueTypeBaseNode* value_node = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* *return_size_val = NULL, ! bool deoptimize_on_exception = false, ! Node* elem_mirror = NULL); // java.lang.String helpers Node* load_String_length(Node* str, bool set_ctrl); Node* load_String_value(Node* str, bool set_ctrl); Node* load_String_coder(Node* str, bool set_ctrl);
*** 890,899 **** --- 930,941 ---- // Insert a loop predicate into the graph void add_predicate(int nargs = 0); void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); Node* make_constant_from_field(ciField* field, Node* obj); + + Node* load_mirror_from_klass(Node* klass); }; // Helper class to support building of control flow branches. Upon // creation the map and sp at bci are cloned and restored upon de- // struction. Typical use:
< prev index next >